repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Infusion-OS/android_kernel_lge_gee | drivers/edac/edac_stub.c | 4967 | 2101 | /*
* common EDAC components that must be in kernel
*
* Author: Dave Jiang <djiang@mvista.com>
*
* 2007 (c) MontaVista Software, Inc.
* 2010 (c) Advanced Micro Devices Inc.
* Borislav Petkov <borislav.petkov@amd.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
*/
#include <linux/module.h>
#include <linux/edac.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <asm/edac.h>
int edac_op_state = EDAC_OPSTATE_INVAL;
EXPORT_SYMBOL_GPL(edac_op_state);
atomic_t edac_handlers = ATOMIC_INIT(0);
EXPORT_SYMBOL_GPL(edac_handlers);
int edac_err_assert = 0;
EXPORT_SYMBOL_GPL(edac_err_assert);
static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
/*
* called to determine if there is an EDAC driver interested in
* knowing an event (such as NMI) occurred
*/
int edac_handler_set(void)
{
if (edac_op_state == EDAC_OPSTATE_POLL)
return 0;
return atomic_read(&edac_handlers);
}
EXPORT_SYMBOL_GPL(edac_handler_set);
/*
* handler for NMI type of interrupts to assert error
*/
void edac_atomic_assert_error(void)
{
edac_err_assert++;
}
EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
/*
* sysfs object: /sys/devices/system/edac
* need to export to other files
*/
struct bus_type edac_subsys = {
.name = "edac",
.dev_name = "edac",
};
EXPORT_SYMBOL_GPL(edac_subsys);
/* return pointer to the 'edac' node in sysfs */
struct bus_type *edac_get_sysfs_subsys(void)
{
int err = 0;
if (atomic_read(&edac_subsys_valid))
goto out;
/* create the /sys/devices/system/edac directory */
err = subsys_system_register(&edac_subsys, NULL);
if (err) {
printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
return NULL;
}
out:
atomic_inc(&edac_subsys_valid);
return &edac_subsys;
}
EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
void edac_put_sysfs_subsys(void)
{
/* last user unregisters it */
if (atomic_dec_and_test(&edac_subsys_valid))
bus_unregister(&edac_subsys);
}
EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys);
| gpl-2.0 |
JustAkan/Oxygen_united_kernel-gproj | drivers/mfd/tc6387xb.c | 4967 | 5619 | /*
* Toshiba TC6387XB support
* Copyright (c) 2005 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains TC6387XB base support.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6387xb.h>
#include <linux/slab.h>
enum {
TC6387XB_CELL_MMC,
};
struct tc6387xb {
void __iomem *scr;
struct clk *clk32k;
struct resource rscr;
};
static struct resource tc6387xb_mmc_resources[] = {
{
.start = 0x800,
.end = 0x9ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
},
};
/*--------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
if (pdata && pdata->suspend)
pdata->suspend(dev);
clk_disable(tc6387xb->clk32k);
return 0;
}
static int tc6387xb_resume(struct platform_device *dev)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
clk_enable(tc6387xb->clk32k);
if (pdata && pdata->resume)
pdata->resume(dev);
tmio_core_mmc_resume(tc6387xb->scr + 0x200, 0,
tc6387xb_mmc_resources[0].start & 0xfffe);
return 0;
}
#else
#define tc6387xb_suspend NULL
#define tc6387xb_resume NULL
#endif
/*--------------------------------------------------------------------------*/
static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
}
static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
}
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
clk_enable(tc6387xb->clk32k);
tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
tc6387xb_mmc_resources[0].start & 0xfffe);
return 0;
}
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
clk_disable(tc6387xb->clk32k);
return 0;
}
static struct tmio_mmc_data tc6387xb_mmc_data = {
.hclk = 24000000,
.set_pwr = tc6387xb_mmc_pwr,
.set_clk_div = tc6387xb_mmc_clk_div,
};
/*--------------------------------------------------------------------------*/
static struct mfd_cell tc6387xb_cells[] = {
[TC6387XB_CELL_MMC] = {
.name = "tmio-mmc",
.enable = tc6387xb_mmc_enable,
.disable = tc6387xb_mmc_disable,
.platform_data = &tc6387xb_mmc_data,
.pdata_size = sizeof(tc6387xb_mmc_data),
.num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
.resources = tc6387xb_mmc_resources,
},
};
static int __devinit tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
struct resource *iomem, *rscr;
struct clk *clk32k;
struct tc6387xb *tc6387xb;
int irq, ret;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!iomem) {
return -EINVAL;
}
tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL);
if (!tc6387xb)
return -ENOMEM;
ret = platform_get_irq(dev, 0);
if (ret >= 0)
irq = ret;
else
goto err_no_irq;
clk32k = clk_get(&dev->dev, "CLK_CK32K");
if (IS_ERR(clk32k)) {
ret = PTR_ERR(clk32k);
goto err_no_clk;
}
rscr = &tc6387xb->rscr;
rscr->name = "tc6387xb-core";
rscr->start = iomem->start;
rscr->end = iomem->start + 0xff;
rscr->flags = IORESOURCE_MEM;
ret = request_resource(iomem, rscr);
if (ret)
goto err_resource;
tc6387xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!tc6387xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
}
tc6387xb->clk32k = clk32k;
platform_set_drvdata(dev, tc6387xb);
if (pdata && pdata->enable)
pdata->enable(dev);
printk(KERN_INFO "Toshiba tc6387xb initialised\n");
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq);
if (!ret)
return 0;
iounmap(tc6387xb->scr);
err_ioremap:
release_resource(&tc6387xb->rscr);
err_resource:
clk_put(clk32k);
err_no_clk:
err_no_irq:
kfree(tc6387xb);
return ret;
}
static int __devexit tc6387xb_remove(struct platform_device *dev)
{
struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
mfd_remove_devices(&dev->dev);
iounmap(tc6387xb->scr);
release_resource(&tc6387xb->rscr);
clk_disable(tc6387xb->clk32k);
clk_put(tc6387xb->clk32k);
platform_set_drvdata(dev, NULL);
kfree(tc6387xb);
return 0;
}
static struct platform_driver tc6387xb_platform_driver = {
.driver = {
.name = "tc6387xb",
},
.probe = tc6387xb_probe,
.remove = __devexit_p(tc6387xb_remove),
.suspend = tc6387xb_suspend,
.resume = tc6387xb_resume,
};
module_platform_driver(tc6387xb_platform_driver);
MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton");
MODULE_ALIAS("platform:tc6387xb");
| gpl-2.0 |
TI-OpenLink/kernel-omap | drivers/scsi/aacraid/dpcsup.c | 8039 | 11592 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* dpcsup.c
*
* Abstract: All DPC processing routines for the cyclone board occur here.
*
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/semaphore.h>
#include "aacraid.h"
/**
* aac_response_normal - Handle command replies
* @q: Queue to read from
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting. We will
* take a spinlock out on the queue before operating on it.
*/
unsigned int aac_response_normal(struct aac_queue * q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
struct hw_fib * hwfib;
struct fib * fib;
int consumed = 0;
unsigned long flags, mflags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system. If no response was requesed we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
{
int fast;
u32 index = le32_to_cpu(entry->addr);
fast = index & 0x01;
fib = &dev->fibs[index >> 2];
hwfib = fib->hw_fib_va;
aac_consumer_free(dev, q, HostNormRespQueue);
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
spin_unlock_irqrestore(q->lock, flags);
aac_fib_complete(fib);
aac_fib_free(fib);
spin_lock_irqsave(q->lock, flags);
continue;
}
spin_unlock_irqrestore(q->lock, flags);
if (fast) {
/*
* Doctor the fib
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
__le32 *pstatus = (__le32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
if (!fib->done) {
fib->done = 1;
up(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
spin_lock_irqsave(&dev->manage_lock, mflags);
dev->management_fib_count--;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
if (fib->done == 2) {
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
aac_fib_free(fib);
}
}
consumed++;
spin_lock_irqsave(q->lock, flags);
}
if (consumed > aac_config.peak_fibs)
aac_config.peak_fibs = consumed;
if (consumed == 0)
aac_config.zero_fibs++;
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/**
* aac_command_normal - handle commands
* @q: queue to process
*
* This DPC routine will be queued when the adapter interrupts us to
* let us know there is a command on our normal priority queue. We will
* pull off all QE there are and wake up all the waiters before exiting.
* We will take a spinlock out on the queue before operating on it.
*/
unsigned int aac_command_normal(struct aac_queue *q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
unsigned long flags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system.
*/
while(aac_consumer_get(dev, q, &entry))
{
struct fib fibctx;
struct hw_fib * hw_fib;
u32 index;
struct fib *fib = &fibctx;
index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
hw_fib = &dev->aif_base_va[index];
/*
* Allocate a FIB at all costs. For non queued stuff
* we can just use the stack so we are happy. We need
* a fib object in order to manage the linked lists
*/
if (dev->aif_thread)
if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
fib = &fibctx;
memset(fib, 0, sizeof(struct fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
if (dev->aif_thread && fib != &fibctx) {
list_add_tail(&fib->fiblink, &q->cmdq);
aac_consumer_free(dev, q, HostNormCmdQueue);
wake_up_interruptible(&q->cmdready);
} else {
aac_consumer_free(dev, q, HostNormCmdQueue);
spin_unlock_irqrestore(q->lock, flags);
/*
* Set the status of this FIB
*/
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
aac_fib_adapter_complete(fib, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
}
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/*
*
* aac_aif_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the AIFs - new method (SRC)
*
*/
static void aac_aif_callback(void *context, struct fib * fibptr)
{
struct fib *fibctx;
struct aac_dev *dev;
struct aac_aifcmd *cmd;
int status;
fibctx = (struct fib *)context;
BUG_ON(fibptr == NULL);
dev = fibptr->dev;
if (fibptr->hw_fib_va->header.XferState &
cpu_to_le32(NoMoreAifDataAvailable)) {
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
return;
}
aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
aac_fib_init(fibctx);
cmd = (struct aac_aifcmd *) fib_data(fibctx);
cmd->command = cpu_to_le32(AifReqEvent);
status = aac_fib_send(AifRequest,
fibctx,
sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
FsaNormal,
0, 1,
(fib_callback)aac_aif_callback, fibctx);
}
/**
* aac_intr_normal - Handle command replies
* @dev: Device
* @index: completion reference
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting.
*/
unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
int isAif, int isFastResponse, struct hw_fib *aif_fib)
{
unsigned long mflags;
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
if (isAif == 1) { /* AIF - common */
struct hw_fib * hw_fib;
struct fib * fib;
struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
unsigned long flags;
/*
* Allocate a FIB. For non queued stuff we can just use
* the stack so we are happy. We need a fib object in order to
* manage the linked lists.
*/
if ((!dev->aif_thread)
|| (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
return 1;
if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
kfree (fib);
return 1;
}
if (aif_fib != NULL) {
memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
} else {
memcpy(hw_fib,
(struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
index), sizeof(struct hw_fib));
}
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
spin_lock_irqsave(q->lock, flags);
list_add_tail(&fib->fiblink, &q->cmdq);
wake_up_interruptible(&q->cmdready);
spin_unlock_irqrestore(q->lock, flags);
return 1;
} else if (isAif == 2) { /* AIF - new (SRC) */
struct fib *fibctx;
struct aac_aifcmd *cmd;
fibctx = aac_fib_alloc(dev);
if (!fibctx)
return 1;
aac_fib_init(fibctx);
cmd = (struct aac_aifcmd *) fib_data(fibctx);
cmd->command = cpu_to_le32(AifReqEvent);
return aac_fib_send(AifRequest,
fibctx,
sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
FsaNormal,
0, 1,
(fib_callback)aac_aif_callback, fibctx);
} else {
struct fib *fib = &dev->fibs[index];
struct hw_fib * hwfib = fib->hw_fib_va;
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
aac_fib_complete(fib);
aac_fib_free(fib);
return 0;
}
if (isFastResponse) {
/*
* Doctor the fib
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
__le32 *pstatus = (__le32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
if (!fib->done) {
fib->done = 1;
up(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
spin_lock_irqsave(&dev->manage_lock, mflags);
dev->management_fib_count--;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
if (fib->done == 2) {
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
aac_fib_free(fib);
}
}
return 0;
}
}
| gpl-2.0 |
linuxmake/kernel_softwinner_fiber | drivers/scsi/aacraid/dpcsup.c | 8039 | 11592 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* dpcsup.c
*
* Abstract: All DPC processing routines for the cyclone board occur here.
*
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <linux/semaphore.h>
#include "aacraid.h"
/**
* aac_response_normal - Handle command replies
* @q: Queue to read from
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting. We will
* take a spinlock out on the queue before operating on it.
*/
unsigned int aac_response_normal(struct aac_queue * q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
struct hw_fib * hwfib;
struct fib * fib;
int consumed = 0;
unsigned long flags, mflags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system. If no response was requesed we just
* deallocate the Fib here and continue.
*/
while(aac_consumer_get(dev, q, &entry))
{
int fast;
u32 index = le32_to_cpu(entry->addr);
fast = index & 0x01;
fib = &dev->fibs[index >> 2];
hwfib = fib->hw_fib_va;
aac_consumer_free(dev, q, HostNormRespQueue);
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
spin_unlock_irqrestore(q->lock, flags);
aac_fib_complete(fib);
aac_fib_free(fib);
spin_lock_irqsave(q->lock, flags);
continue;
}
spin_unlock_irqrestore(q->lock, flags);
if (fast) {
/*
* Doctor the fib
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
__le32 *pstatus = (__le32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
if (!fib->done) {
fib->done = 1;
up(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
spin_lock_irqsave(&dev->manage_lock, mflags);
dev->management_fib_count--;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
if (fib->done == 2) {
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
aac_fib_free(fib);
}
}
consumed++;
spin_lock_irqsave(q->lock, flags);
}
if (consumed > aac_config.peak_fibs)
aac_config.peak_fibs = consumed;
if (consumed == 0)
aac_config.zero_fibs++;
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/**
* aac_command_normal - handle commands
* @q: queue to process
*
* This DPC routine will be queued when the adapter interrupts us to
* let us know there is a command on our normal priority queue. We will
* pull off all QE there are and wake up all the waiters before exiting.
* We will take a spinlock out on the queue before operating on it.
*/
unsigned int aac_command_normal(struct aac_queue *q)
{
struct aac_dev * dev = q->dev;
struct aac_entry *entry;
unsigned long flags;
spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
* back to the system.
*/
while(aac_consumer_get(dev, q, &entry))
{
struct fib fibctx;
struct hw_fib * hw_fib;
u32 index;
struct fib *fib = &fibctx;
index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
hw_fib = &dev->aif_base_va[index];
/*
* Allocate a FIB at all costs. For non queued stuff
* we can just use the stack so we are happy. We need
* a fib object in order to manage the linked lists
*/
if (dev->aif_thread)
if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
fib = &fibctx;
memset(fib, 0, sizeof(struct fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
if (dev->aif_thread && fib != &fibctx) {
list_add_tail(&fib->fiblink, &q->cmdq);
aac_consumer_free(dev, q, HostNormCmdQueue);
wake_up_interruptible(&q->cmdready);
} else {
aac_consumer_free(dev, q, HostNormCmdQueue);
spin_unlock_irqrestore(q->lock, flags);
/*
* Set the status of this FIB
*/
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
aac_fib_adapter_complete(fib, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
}
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
/*
*
* aac_aif_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the AIFs - new method (SRC)
*
*/
static void aac_aif_callback(void *context, struct fib * fibptr)
{
struct fib *fibctx;
struct aac_dev *dev;
struct aac_aifcmd *cmd;
int status;
fibctx = (struct fib *)context;
BUG_ON(fibptr == NULL);
dev = fibptr->dev;
if (fibptr->hw_fib_va->header.XferState &
cpu_to_le32(NoMoreAifDataAvailable)) {
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
return;
}
aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
aac_fib_init(fibctx);
cmd = (struct aac_aifcmd *) fib_data(fibctx);
cmd->command = cpu_to_le32(AifReqEvent);
status = aac_fib_send(AifRequest,
fibctx,
sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
FsaNormal,
0, 1,
(fib_callback)aac_aif_callback, fibctx);
}
/**
* aac_intr_normal - Handle command replies
* @dev: Device
* @index: completion reference
*
* This DPC routine will be run when the adapter interrupts us to let us
* know there is a response on our normal priority queue. We will pull off
* all QE there are and wake up all the waiters before exiting.
*/
unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
int isAif, int isFastResponse, struct hw_fib *aif_fib)
{
unsigned long mflags;
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
if (isAif == 1) { /* AIF - common */
struct hw_fib * hw_fib;
struct fib * fib;
struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
unsigned long flags;
/*
* Allocate a FIB. For non queued stuff we can just use
* the stack so we are happy. We need a fib object in order to
* manage the linked lists.
*/
if ((!dev->aif_thread)
|| (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
return 1;
if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
kfree (fib);
return 1;
}
if (aif_fib != NULL) {
memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
} else {
memcpy(hw_fib,
(struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
index), sizeof(struct hw_fib));
}
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
spin_lock_irqsave(q->lock, flags);
list_add_tail(&fib->fiblink, &q->cmdq);
wake_up_interruptible(&q->cmdready);
spin_unlock_irqrestore(q->lock, flags);
return 1;
} else if (isAif == 2) { /* AIF - new (SRC) */
struct fib *fibctx;
struct aac_aifcmd *cmd;
fibctx = aac_fib_alloc(dev);
if (!fibctx)
return 1;
aac_fib_init(fibctx);
cmd = (struct aac_aifcmd *) fib_data(fibctx);
cmd->command = cpu_to_le32(AifReqEvent);
return aac_fib_send(AifRequest,
fibctx,
sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
FsaNormal,
0, 1,
(fib_callback)aac_aif_callback, fibctx);
} else {
struct fib *fib = &dev->fibs[index];
struct hw_fib * hwfib = fib->hw_fib_va;
/*
* Remove this fib from the Outstanding I/O queue.
* But only if it has not already been timed out.
*
* If the fib has been timed out already, then just
* continue. The caller has already been notified that
* the fib timed out.
*/
dev->queues->queue[AdapNormCmdQueue].numpending--;
if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
aac_fib_complete(fib);
aac_fib_free(fib);
return 0;
}
if (isFastResponse) {
/*
* Doctor the fib
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
{
__le32 *pstatus = (__le32 *)hwfib->data;
if (*pstatus & cpu_to_le32(0xffff0000))
*pstatus = cpu_to_le32(ST_OK);
}
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
{
if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
else
FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
/*
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
fib->flags = 0;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
spin_lock_irqsave(&fib->event_lock, flagv);
if (!fib->done) {
fib->done = 1;
up(&fib->event_wait);
}
spin_unlock_irqrestore(&fib->event_lock, flagv);
spin_lock_irqsave(&dev->manage_lock, mflags);
dev->management_fib_count--;
spin_unlock_irqrestore(&dev->manage_lock, mflags);
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
if (fib->done == 2) {
spin_lock_irqsave(&fib->event_lock, flagv);
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
aac_fib_free(fib);
}
}
return 0;
}
}
| gpl-2.0 |
javelinanddart/android_kernel_htc_pyramid | drivers/media/dvb/siano/smsir.c | 8295 | 3182 | /****************************************************************
Siano Mobile Silicon, Inc.
MDTV receiver kernel modules.
Copyright (C) 2006-2009, Uri Shkolnik
Copyright (c) 2010 - Mauro Carvalho Chehab
- Ported the driver to use rc-core
- IR raw event decoding is now done at rc-core
- Code almost re-written
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
****************************************************************/
#include <linux/types.h>
#include <linux/input.h>
#include "smscoreapi.h"
#include "smsir.h"
#include "sms-cards.h"
#define MODULE_NAME "smsmdtv"
void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
{
int i;
const s32 *samples = (const void *)buf;
for (i = 0; i < len >> 2; i++) {
DEFINE_IR_RAW_EVENT(ev);
ev.duration = abs(samples[i]) * 1000; /* Convert to ns */
ev.pulse = (samples[i] > 0) ? false : true;
ir_raw_event_store(coredev->ir.dev, &ev);
}
ir_raw_event_handle(coredev->ir.dev);
}
int sms_ir_init(struct smscore_device_t *coredev)
{
int err;
int board_id = smscore_get_board_id(coredev);
struct rc_dev *dev;
sms_log("Allocating rc device");
dev = rc_allocate_device();
if (!dev) {
sms_err("Not enough memory");
return -ENOMEM;
}
coredev->ir.controller = 0; /* Todo: vega/nova SPI number */
coredev->ir.timeout = IR_DEFAULT_TIMEOUT;
sms_log("IR port %d, timeout %d ms",
coredev->ir.controller, coredev->ir.timeout);
snprintf(coredev->ir.name, sizeof(coredev->ir.name),
"SMS IR (%s)", sms_get_board(board_id)->name);
strlcpy(coredev->ir.phys, coredev->devpath, sizeof(coredev->ir.phys));
strlcat(coredev->ir.phys, "/ir0", sizeof(coredev->ir.phys));
dev->input_name = coredev->ir.name;
dev->input_phys = coredev->ir.phys;
dev->dev.parent = coredev->device;
#if 0
/* TODO: properly initialize the parameters bellow */
dev->input_id.bustype = BUS_USB;
dev->input_id.version = 1;
dev->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
dev->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
#endif
dev->priv = coredev;
dev->driver_type = RC_DRIVER_IR_RAW;
dev->allowed_protos = RC_TYPE_ALL;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
sms_log("Input device (IR) %s is set for key events", dev->input_name);
err = rc_register_device(dev);
if (err < 0) {
sms_err("Failed to register device");
rc_free_device(dev);
return err;
}
coredev->ir.dev = dev;
return 0;
}
void sms_ir_exit(struct smscore_device_t *coredev)
{
if (coredev->ir.dev)
rc_unregister_device(coredev->ir.dev);
sms_log("");
}
| gpl-2.0 |
randomblame/android_kernel_acer_t20-common | arch/m68k/platform/5307/nettel.c | 8551 | 4096 | /***************************************************************************/
/*
* nettel.c -- startup code support for the NETtel boards
*
* Copyright (C) 2009, Greg Ungerer (gerg@snapgear.com)
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/nettel.h>
/***************************************************************************/
/*
* Define the IO and interrupt resources of the 2 SMC9196 interfaces.
*/
#define NETTEL_SMC0_ADDR 0x30600300
#define NETTEL_SMC0_IRQ 29
#define NETTEL_SMC1_ADDR 0x30600000
#define NETTEL_SMC1_IRQ 27
/*
* We need some access into the SMC9196 registers. Define those registers
* we will need here (including the smc91x.h doesn't seem to give us these
* in a simple form).
*/
#define SMC91xx_BANKSELECT 14
#define SMC91xx_BASEADDR 2
#define SMC91xx_BASEMAC 4
/***************************************************************************/
static struct resource nettel_smc91x_0_resources[] = {
{
.start = NETTEL_SMC0_ADDR,
.end = NETTEL_SMC0_ADDR + 0x20,
.flags = IORESOURCE_MEM,
},
{
.start = NETTEL_SMC0_IRQ,
.end = NETTEL_SMC0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct resource nettel_smc91x_1_resources[] = {
{
.start = NETTEL_SMC1_ADDR,
.end = NETTEL_SMC1_ADDR + 0x20,
.flags = IORESOURCE_MEM,
},
{
.start = NETTEL_SMC1_IRQ,
.end = NETTEL_SMC1_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device nettel_smc91x[] = {
{
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(nettel_smc91x_0_resources),
.resource = nettel_smc91x_0_resources,
},
{
.name = "smc91x",
.id = 1,
.num_resources = ARRAY_SIZE(nettel_smc91x_1_resources),
.resource = nettel_smc91x_1_resources,
},
};
static struct platform_device *nettel_devices[] __initdata = {
&nettel_smc91x[0],
&nettel_smc91x[1],
};
/***************************************************************************/
static u8 nettel_macdefault[] __initdata = {
0x00, 0xd0, 0xcf, 0x00, 0x00, 0x01,
};
/*
* Set flash contained MAC address into SMC9196 core. Make sure the flash
* MAC address is sane, and not an empty flash. If no good use the Moreton
* Bay default MAC address instead.
*/
static void __init nettel_smc91x_setmac(unsigned int ioaddr, unsigned int flashaddr)
{
u16 *macp;
macp = (u16 *) flashaddr;
if ((macp[0] == 0xffff) && (macp[1] == 0xffff) && (macp[2] == 0xffff))
macp = (u16 *) &nettel_macdefault[0];
writew(1, NETTEL_SMC0_ADDR + SMC91xx_BANKSELECT);
writew(macp[0], ioaddr + SMC91xx_BASEMAC);
writew(macp[1], ioaddr + SMC91xx_BASEMAC + 2);
writew(macp[2], ioaddr + SMC91xx_BASEMAC + 4);
}
/***************************************************************************/
/*
* Re-map the address space of at least one of the SMC ethernet
* parts. Both parts power up decoding the same address, so we
* need to move one of them first, before doing anything else.
*/
static void __init nettel_smc91x_init(void)
{
writew(0x00ec, MCF_MBAR + MCFSIM_PADDR);
mcf_setppdata(0, 0x0080);
writew(1, NETTEL_SMC0_ADDR + SMC91xx_BANKSELECT);
writew(0x0067, NETTEL_SMC0_ADDR + SMC91xx_BASEADDR);
mcf_setppdata(0x0080, 0);
/* Set correct chip select timing for SMC9196 accesses */
writew(0x1180, MCF_MBAR + MCFSIM_CSCR3);
/* Set the SMC interrupts to be auto-vectored */
mcf_autovector(NETTEL_SMC0_IRQ);
mcf_autovector(NETTEL_SMC1_IRQ);
/* Set MAC addresses from flash for both interfaces */
nettel_smc91x_setmac(NETTEL_SMC0_ADDR, 0xf0006000);
nettel_smc91x_setmac(NETTEL_SMC1_ADDR, 0xf0006006);
}
/***************************************************************************/
static int __init init_nettel(void)
{
nettel_smc91x_init();
platform_add_devices(nettel_devices, ARRAY_SIZE(nettel_devices));
return 0;
}
arch_initcall(init_nettel);
/***************************************************************************/
| gpl-2.0 |
Dzenik/kernel-source | drivers/pcmcia/pxa2xx_cm_x270.c | 9831 | 2472 | /*
* linux/drivers/pcmcia/pxa/pxa_cm_x270.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Compulab Ltd., 2003, 2007, 2008
* Mike Rapoport <mike@compulab.co.il>
*
*/
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include "soc_common.h"
#define GPIO_PCMCIA_S0_CD_VALID (84)
#define GPIO_PCMCIA_S0_RDYINT (82)
#define GPIO_PCMCIA_RESET (53)
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = gpio_request(GPIO_PCMCIA_RESET, "PCCard reset");
if (ret)
return ret;
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->stat[SOC_STAT_CD].gpio = GPIO_PCMCIA_S0_CD_VALID;
skt->stat[SOC_STAT_CD].name = "PCMCIA0 CD";
skt->stat[SOC_STAT_RDY].gpio = GPIO_PCMCIA_S0_RDYINT;
skt->stat[SOC_STAT_RDY].name = "PCMCIA0 RDY";
return ret;
}
static void cmx270_pcmcia_shutdown(struct soc_pcmcia_socket *skt)
{
gpio_free(GPIO_PCMCIA_RESET);
}
static void cmx270_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
state->vs_3v = 0;
state->vs_Xv = 0;
}
static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
switch (skt->nr) {
case 0:
if (state->flags & SS_RESET) {
gpio_set_value(GPIO_PCMCIA_RESET, 1);
udelay(10);
gpio_set_value(GPIO_PCMCIA_RESET, 0);
}
break;
}
return 0;
}
static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx270_pcmcia_hw_init,
.hw_shutdown = cmx270_pcmcia_shutdown,
.socket_state = cmx270_pcmcia_socket_state,
.configure_socket = cmx270_pcmcia_configure_socket,
.nr = 1,
};
static struct platform_device *cmx270_pcmcia_device;
int __init cmx270_pcmcia_init(void)
{
int ret;
cmx270_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!cmx270_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(cmx270_pcmcia_device, &cmx270_pcmcia_ops,
sizeof(cmx270_pcmcia_ops));
if (ret == 0) {
printk(KERN_INFO "Registering cm-x270 PCMCIA interface.\n");
ret = platform_device_add(cmx270_pcmcia_device);
}
if (ret)
platform_device_put(cmx270_pcmcia_device);
return ret;
}
void __exit cmx270_pcmcia_exit(void)
{
platform_device_unregister(cmx270_pcmcia_device);
}
| gpl-2.0 |
android-armv7a-belalang-tempur/Android_SpeedKernel | drivers/mtd/maps/vmax301.c | 10343 | 5370 | /* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
The VMAx 301 is a SBC based on . It
comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
more flash. Each unit has it's own 8k mapping into a settable region
(0xD8000). There are two 8k mappings for each MTD, the first is always set
to the lower 8k of the device the second is paged. Writing a 16 bit page
value to anywhere in the first 8k will cause the second 8k to page around.
To boot the device a bios extension must be installed into the first 8k
of flash that is smart enough to copy itself down, page in the rest of
itself and begin executing.
##################################################################### */
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#define WINDOW_START 0xd8000
#define WINDOW_LENGTH 0x2000
#define WINDOW_SHIFT 25
#define WINDOW_MASK 0x1FFF
/* Actually we could use two spinlocks, but we'd have to have
more private space in the struct map_info. We lose a little
performance like this, but we'd probably lose more by having
the extra indirection from having one of the map->map_priv
fields pointing to yet another private struct.
*/
static DEFINE_SPINLOCK(vmax301_spin);
static void __vmax301_page(struct map_info *map, unsigned long page)
{
writew(page, map->map_priv_2 - WINDOW_LENGTH);
map->map_priv_1 = page;
}
static inline void vmax301_page(struct map_info *map,
unsigned long ofs)
{
unsigned long page = (ofs >> WINDOW_SHIFT);
if (map->map_priv_1 != page)
__vmax301_page(map, page);
}
static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
{
map_word ret;
spin_lock(&vmax301_spin);
vmax301_page(map, ofs);
ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
spin_unlock(&vmax301_spin);
return ret;
}
static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
spin_lock(&vmax301_spin);
vmax301_page(map, from);
memcpy_fromio(to, map->map_priv_2 + from, thislen);
spin_unlock(&vmax301_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&vmax301_spin);
vmax301_page(map, adr);
writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
spin_unlock(&vmax301_spin);
}
static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
spin_lock(&vmax301_spin);
vmax301_page(map, to);
memcpy_toio(map->map_priv_2 + to, from, thislen);
spin_unlock(&vmax301_spin);
to += thislen;
from += thislen;
len -= thislen;
}
}
static struct map_info vmax_map[2] = {
{
.name = "VMAX301 Internal Flash",
.phys = NO_XIP,
.size = 3*2*1024*1024,
.bankwidth = 1,
.read = vmax301_read8,
.copy_from = vmax301_copy_from,
.write = vmax301_write8,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + WINDOW_LENGTH,
.map_priv_2 = 0xFFFFFFFF
},
{
.name = "VMAX301 Socket",
.phys = NO_XIP,
.size = 0,
.bankwidth = 1,
.read = vmax301_read8,
.copy_from = vmax301_copy_from,
.write = vmax301_write8,
.copy_to = vmax301_copy_to,
.map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
.map_priv_2 = 0xFFFFFFFF
}
};
static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
static void __exit cleanup_vmax301(void)
{
int i;
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
mtd_device_unregister(vmax_mtd[i]);
map_destroy(vmax_mtd[i]);
}
}
iounmap((void *)vmax_map[0].map_priv_1 - WINDOW_START);
}
static int __init init_vmax301(void)
{
int i;
unsigned long iomapadr;
// Print out our little header..
printk("Tempustech VMAX 301 MEM:0x%x-0x%x\n",WINDOW_START,
WINDOW_START+4*WINDOW_LENGTH);
iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH*4);
if (!iomapadr) {
printk("Failed to ioremap memory region\n");
return -EIO;
}
/* Put the address in the map's private data area.
We store the actual MTD IO address rather than the
address of the first half, because it's used more
often.
*/
vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
for (i=0; i<2; i++) {
vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->owner = THIS_MODULE;
mtd_device_register(vmax_mtd[i], NULL, 0);
}
}
if (!vmax_mtd[0] && !vmax_mtd[1]) {
iounmap((void *)iomapadr);
return -ENXIO;
}
return 0;
}
module_init(init_vmax301);
module_exit(cleanup_vmax301);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
| gpl-2.0 |
umiddelb/linux-fslc | fs/hfs/attr.c | 13671 | 2666 | /*
* linux/fs/hfs/attr.c
*
* (C) 2003 Ardis Technologies <roman@ardistech.com>
*
* Export hfs data via xattr
*/
#include <linux/fs.h>
#include <linux/xattr.h>
#include "hfs_fs.h"
#include "btree.h"
int hfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct inode *inode = dentry->d_inode;
struct hfs_find_data fd;
hfs_cat_rec rec;
struct hfs_cat_file *file;
int res;
if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
return -EOPNOTSUPP;
res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd);
if (res)
return res;
fd.search_key->cat = HFS_I(inode)->cat_key;
res = hfs_brec_find(&fd);
if (res)
goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
file = &rec.file;
if (!strcmp(name, "hfs.type")) {
if (size == 4)
memcpy(&file->UsrWds.fdType, value, 4);
else
res = -ERANGE;
} else if (!strcmp(name, "hfs.creator")) {
if (size == 4)
memcpy(&file->UsrWds.fdCreator, value, 4);
else
res = -ERANGE;
} else
res = -EOPNOTSUPP;
if (!res)
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
out:
hfs_find_exit(&fd);
return res;
}
ssize_t hfs_getxattr(struct dentry *dentry, const char *name,
void *value, size_t size)
{
struct inode *inode = dentry->d_inode;
struct hfs_find_data fd;
hfs_cat_rec rec;
struct hfs_cat_file *file;
ssize_t res = 0;
if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (size) {
res = hfs_find_init(HFS_SB(inode->i_sb)->cat_tree, &fd);
if (res)
return res;
fd.search_key->cat = HFS_I(inode)->cat_key;
res = hfs_brec_find(&fd);
if (res)
goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
}
file = &rec.file;
if (!strcmp(name, "hfs.type")) {
if (size >= 4) {
memcpy(value, &file->UsrWds.fdType, 4);
res = 4;
} else
res = size ? -ERANGE : 4;
} else if (!strcmp(name, "hfs.creator")) {
if (size >= 4) {
memcpy(value, &file->UsrWds.fdCreator, 4);
res = 4;
} else
res = size ? -ERANGE : 4;
} else
res = -ENODATA;
out:
if (size)
hfs_find_exit(&fd);
return res;
}
#define HFS_ATTRLIST_SIZE (sizeof("hfs.creator")+sizeof("hfs.type"))
ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct inode *inode = dentry->d_inode;
if (!S_ISREG(inode->i_mode) || HFS_IS_RSRC(inode))
return -EOPNOTSUPP;
if (!buffer || !size)
return HFS_ATTRLIST_SIZE;
if (size < HFS_ATTRLIST_SIZE)
return -ERANGE;
strcpy(buffer, "hfs.type");
strcpy(buffer + sizeof("hfs.type"), "hfs.creator");
return HFS_ATTRLIST_SIZE;
}
| gpl-2.0 |
vishal-android-freak/Super_Fusion | fs/ntfs/unistr.c | 14951 | 12445 | /*
* unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2006 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
#include "types.h"
#include "debug.h"
#include "ntfs.h"
/*
* IMPORTANT
* =========
*
* All these routines assume that the Unicode characters are in little endian
* encoding inside the strings!!!
*/
/*
* This is used by the name collation functions to quickly determine what
* characters are (in)valid.
*/
static const u8 legal_ansi_char_array[0x40] = {
0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x17, 0x07, 0x18, 0x17, 0x17, 0x17, 0x17, 0x17,
0x17, 0x17, 0x18, 0x16, 0x16, 0x17, 0x07, 0x00,
0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17, 0x17,
0x17, 0x17, 0x04, 0x16, 0x18, 0x16, 0x18, 0x18,
};
/**
* ntfs_are_names_equal - compare two Unicode names for equality
* @s1: name to compare to @s2
* @s1_len: length in Unicode characters of @s1
* @s2: name to compare to @s1
* @s2_len: length in Unicode characters of @s2
* @ic: ignore case bool
* @upcase: upcase table (only if @ic == IGNORE_CASE)
* @upcase_size: length in Unicode characters of @upcase (if present)
*
* Compare the names @s1 and @s2 and return 'true' (1) if the names are
* identical, or 'false' (0) if they are not identical. If @ic is IGNORE_CASE,
* the @upcase table is used to performa a case insensitive comparison.
*/
bool ntfs_are_names_equal(const ntfschar *s1, size_t s1_len,
const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_size)
{
if (s1_len != s2_len)
return false;
if (ic == CASE_SENSITIVE)
return !ntfs_ucsncmp(s1, s2, s1_len);
return !ntfs_ucsncasecmp(s1, s2, s1_len, upcase, upcase_size);
}
/**
* ntfs_collate_names - collate two Unicode names
* @name1: first Unicode name to compare
* @name2: second Unicode name to compare
* @err_val: if @name1 contains an invalid character return this value
* @ic: either CASE_SENSITIVE or IGNORE_CASE
* @upcase: upcase table (ignored if @ic is CASE_SENSITIVE)
* @upcase_len: upcase table size (ignored if @ic is CASE_SENSITIVE)
*
* ntfs_collate_names collates two Unicode names and returns:
*
* -1 if the first name collates before the second one,
* 0 if the names match,
* 1 if the second name collates before the first one, or
* @err_val if an invalid character is found in @name1 during the comparison.
*
* The following characters are considered invalid: '"', '*', '<', '>' and '?'.
*/
int ntfs_collate_names(const ntfschar *name1, const u32 name1_len,
const ntfschar *name2, const u32 name2_len,
const int err_val, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_len)
{
u32 cnt, min_len;
u16 c1, c2;
min_len = name1_len;
if (name1_len > name2_len)
min_len = name2_len;
for (cnt = 0; cnt < min_len; ++cnt) {
c1 = le16_to_cpu(*name1++);
c2 = le16_to_cpu(*name2++);
if (ic) {
if (c1 < upcase_len)
c1 = le16_to_cpu(upcase[c1]);
if (c2 < upcase_len)
c2 = le16_to_cpu(upcase[c2]);
}
if (c1 < 64 && legal_ansi_char_array[c1] & 8)
return err_val;
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
}
if (name1_len < name2_len)
return -1;
if (name1_len == name2_len)
return 0;
/* name1_len > name2_len */
c1 = le16_to_cpu(*name1);
if (c1 < 64 && legal_ansi_char_array[c1] & 8)
return err_val;
return 1;
}
/**
* ntfs_ucsncmp - compare two little endian Unicode strings
* @s1: first string
* @s2: second string
* @n: maximum unicode characters to compare
*
* Compare the first @n characters of the Unicode strings @s1 and @s2,
* The strings in little endian format and appropriate le16_to_cpu()
* conversion is performed on non-little endian machines.
*
* The function returns an integer less than, equal to, or greater than zero
* if @s1 (or the first @n Unicode characters thereof) is found, respectively,
* to be less than, to match, or be greater than @s2.
*/
int ntfs_ucsncmp(const ntfschar *s1, const ntfschar *s2, size_t n)
{
u16 c1, c2;
size_t i;
for (i = 0; i < n; ++i) {
c1 = le16_to_cpu(s1[i]);
c2 = le16_to_cpu(s2[i]);
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
if (!c1)
break;
}
return 0;
}
/**
* ntfs_ucsncasecmp - compare two little endian Unicode strings, ignoring case
* @s1: first string
* @s2: second string
* @n: maximum unicode characters to compare
* @upcase: upcase table
* @upcase_size: upcase table size in Unicode characters
*
* Compare the first @n characters of the Unicode strings @s1 and @s2,
* ignoring case. The strings in little endian format and appropriate
* le16_to_cpu() conversion is performed on non-little endian machines.
*
* Each character is uppercased using the @upcase table before the comparison.
*
* The function returns an integer less than, equal to, or greater than zero
* if @s1 (or the first @n Unicode characters thereof) is found, respectively,
* to be less than, to match, or be greater than @s2.
*/
int ntfs_ucsncasecmp(const ntfschar *s1, const ntfschar *s2, size_t n,
const ntfschar *upcase, const u32 upcase_size)
{
size_t i;
u16 c1, c2;
for (i = 0; i < n; ++i) {
if ((c1 = le16_to_cpu(s1[i])) < upcase_size)
c1 = le16_to_cpu(upcase[c1]);
if ((c2 = le16_to_cpu(s2[i])) < upcase_size)
c2 = le16_to_cpu(upcase[c2]);
if (c1 < c2)
return -1;
if (c1 > c2)
return 1;
if (!c1)
break;
}
return 0;
}
void ntfs_upcase_name(ntfschar *name, u32 name_len, const ntfschar *upcase,
const u32 upcase_len)
{
u32 i;
u16 u;
for (i = 0; i < name_len; i++)
if ((u = le16_to_cpu(name[i])) < upcase_len)
name[i] = upcase[u];
}
void ntfs_file_upcase_value(FILE_NAME_ATTR *file_name_attr,
const ntfschar *upcase, const u32 upcase_len)
{
ntfs_upcase_name((ntfschar*)&file_name_attr->file_name,
file_name_attr->file_name_length, upcase, upcase_len);
}
int ntfs_file_compare_values(FILE_NAME_ATTR *file_name_attr1,
FILE_NAME_ATTR *file_name_attr2,
const int err_val, const IGNORE_CASE_BOOL ic,
const ntfschar *upcase, const u32 upcase_len)
{
return ntfs_collate_names((ntfschar*)&file_name_attr1->file_name,
file_name_attr1->file_name_length,
(ntfschar*)&file_name_attr2->file_name,
file_name_attr2->file_name_length,
err_val, ic, upcase, upcase_len);
}
/**
* ntfs_nlstoucs - convert NLS string to little endian Unicode string
* @vol: ntfs volume which we are working with
* @ins: input NLS string buffer
* @ins_len: length of input string in bytes
* @outs: on return contains the allocated output Unicode string buffer
*
* Convert the input string @ins, which is in whatever format the loaded NLS
* map dictates, into a little endian, 2-byte Unicode string.
*
* This function allocates the string and the caller is responsible for
* calling kmem_cache_free(ntfs_name_cache, *@outs); when finished with it.
*
* On success the function returns the number of Unicode characters written to
* the output string *@outs (>= 0), not counting the terminating Unicode NULL
* character. *@outs is set to the allocated output string buffer.
*
* On error, a negative number corresponding to the error code is returned. In
* that case the output string is not allocated. Both *@outs and *@outs_len
* are then undefined.
*
* This might look a bit odd due to fast path optimization...
*/
int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
const int ins_len, ntfschar **outs)
{
struct nls_table *nls = vol->nls_map;
ntfschar *ucs;
wchar_t wc;
int i, o, wc_len;
/* We do not trust outside sources. */
if (likely(ins)) {
ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
if (likely(ucs)) {
for (i = o = 0; i < ins_len; i += wc_len) {
wc_len = nls->char2uni(ins + i, ins_len - i,
&wc);
if (likely(wc_len >= 0 &&
o < NTFS_MAX_NAME_LEN)) {
if (likely(wc)) {
ucs[o++] = cpu_to_le16(wc);
continue;
} /* else if (!wc) */
break;
} /* else if (wc_len < 0 ||
o >= NTFS_MAX_NAME_LEN) */
goto name_err;
}
ucs[o] = 0;
*outs = ucs;
return o;
} /* else if (!ucs) */
ntfs_error(vol->sb, "Failed to allocate buffer for converted "
"name from ntfs_name_cache.");
return -ENOMEM;
} /* else if (!ins) */
ntfs_error(vol->sb, "Received NULL pointer.");
return -EINVAL;
name_err:
kmem_cache_free(ntfs_name_cache, ucs);
if (wc_len < 0) {
ntfs_error(vol->sb, "Name using character set %s contains "
"characters that cannot be converted to "
"Unicode.", nls->charset);
i = -EILSEQ;
} else /* if (o >= NTFS_MAX_NAME_LEN) */ {
ntfs_error(vol->sb, "Name is too long (maximum length for a "
"name on NTFS is %d Unicode characters.",
NTFS_MAX_NAME_LEN);
i = -ENAMETOOLONG;
}
return i;
}
/**
* ntfs_ucstonls - convert little endian Unicode string to NLS string
* @vol: ntfs volume which we are working with
* @ins: input Unicode string buffer
* @ins_len: length of input string in Unicode characters
* @outs: on return contains the (allocated) output NLS string buffer
* @outs_len: length of output string buffer in bytes
*
* Convert the input little endian, 2-byte Unicode string @ins, of length
* @ins_len into the string format dictated by the loaded NLS.
*
* If *@outs is NULL, this function allocates the string and the caller is
* responsible for calling kfree(*@outs); when finished with it. In this case
* @outs_len is ignored and can be 0.
*
* On success the function returns the number of bytes written to the output
* string *@outs (>= 0), not counting the terminating NULL byte. If the output
* string buffer was allocated, *@outs is set to it.
*
* On error, a negative number corresponding to the error code is returned. In
* that case the output string is not allocated. The contents of *@outs are
* then undefined.
*
* This might look a bit odd due to fast path optimization...
*/
int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins,
const int ins_len, unsigned char **outs, int outs_len)
{
struct nls_table *nls = vol->nls_map;
unsigned char *ns;
int i, o, ns_len, wc;
/* We don't trust outside sources. */
if (ins) {
ns = *outs;
ns_len = outs_len;
if (ns && !ns_len) {
wc = -ENAMETOOLONG;
goto conversion_err;
}
if (!ns) {
ns_len = ins_len * NLS_MAX_CHARSET_SIZE;
ns = kmalloc(ns_len + 1, GFP_NOFS);
if (!ns)
goto mem_err_out;
}
for (i = o = 0; i < ins_len; i++) {
retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o,
ns_len - o);
if (wc > 0) {
o += wc;
continue;
} else if (!wc)
break;
else if (wc == -ENAMETOOLONG && ns != *outs) {
unsigned char *tc;
/* Grow in multiples of 64 bytes. */
tc = kmalloc((ns_len + 64) &
~63, GFP_NOFS);
if (tc) {
memcpy(tc, ns, ns_len);
ns_len = ((ns_len + 64) & ~63) - 1;
kfree(ns);
ns = tc;
goto retry;
} /* No memory so goto conversion_error; */
} /* wc < 0, real error. */
goto conversion_err;
}
ns[o] = 0;
*outs = ns;
return o;
} /* else (!ins) */
ntfs_error(vol->sb, "Received NULL pointer.");
return -EINVAL;
conversion_err:
ntfs_error(vol->sb, "Unicode name contains characters that cannot be "
"converted to character set %s. You might want to "
"try to use the mount option nls=utf8.", nls->charset);
if (ns != *outs)
kfree(ns);
if (wc != -ENAMETOOLONG)
wc = -EILSEQ;
return wc;
mem_err_out:
ntfs_error(vol->sb, "Failed to allocate name!");
return -ENOMEM;
}
| gpl-2.0 |
lexi6725/linux-3.17.1 | lib/percpu_counter.c | 360 | 5425 | /*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/debugobjects.h>
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
static struct debug_obj_descr percpu_counter_debug_descr;
static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
struct percpu_counter *fbc = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
percpu_counter_destroy(fbc);
debug_object_free(fbc, &percpu_counter_debug_descr);
return 1;
default:
return 0;
}
}
static struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
debug_object_init(fbc, &percpu_counter_debug_descr);
debug_object_activate(fbc, &percpu_counter_debug_descr);
}
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
debug_object_deactivate(fbc, &percpu_counter_debug_descr);
debug_object_free(fbc, &percpu_counter_debug_descr);
}
#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_set);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) {
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
} else {
this_cpu_add(*fbc->counters, amount);
}
preempt_enable();
}
EXPORT_SYMBOL(__percpu_counter_add);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key)
{
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
fbc->counters = alloc_percpu(s32);
if (!fbc->counters)
return -ENOMEM;
debug_percpu_counter_activate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
spin_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
spin_unlock(&percpu_counters_lock);
#endif
return 0;
}
EXPORT_SYMBOL(__percpu_counter_init);
void percpu_counter_destroy(struct percpu_counter *fbc)
{
if (!fbc->counters)
return;
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock(&percpu_counters_lock);
list_del(&fbc->list);
spin_unlock(&percpu_counters_lock);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
static void compute_batch_value(void)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
}
static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
unsigned long action, void *hcpu)
{
#ifdef CONFIG_HOTPLUG_CPU
unsigned int cpu;
struct percpu_counter *fbc;
compute_batch_value();
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
spin_lock(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
spin_unlock(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
/*
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
if (count > rhs)
return 1;
else
return -1;
}
/* Need to use precise count */
count = percpu_counter_sum(fbc);
if (count > rhs)
return 1;
else if (count < rhs)
return -1;
else
return 0;
}
EXPORT_SYMBOL(percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
compute_batch_value();
hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
return 0;
}
module_init(percpu_counter_startup);
| gpl-2.0 |
dh-electronics/linux-am33x | lib/lockref.c | 616 | 3970 | #include <linux/export.h>
#include <linux/lockref.h>
#if USE_CMPXCHG_LOCKREF
/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
struct lockref old; \
BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = READ_ONCE(lockref->lock_count); \
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
old.lock_count, \
new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
cpu_relax_lowlatency(); \
} \
} while (0)
#else
#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
#endif
/**
* lockref_get - Increments reference count unconditionally
* @lockref: pointer to lockref structure
*
* This operation is only valid if you already hold a reference
* to the object, so you know the count cannot be zero.
*/
void lockref_get(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count++;
,
return;
);
spin_lock(&lockref->lock);
lockref->count++;
spin_unlock(&lockref->lock);
}
EXPORT_SYMBOL(lockref_get);
/**
* lockref_get_not_zero - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
*/
int lockref_get_not_zero(struct lockref *lockref)
{
int retval;
CMPXCHG_LOOP(
new.count++;
if (old.count <= 0)
return 0;
,
return 1;
);
spin_lock(&lockref->lock);
retval = 0;
if (lockref->count > 0) {
lockref->count++;
retval = 1;
}
spin_unlock(&lockref->lock);
return retval;
}
EXPORT_SYMBOL(lockref_get_not_zero);
/**
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
* and we got the lock instead.
*/
int lockref_get_or_lock(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count++;
if (old.count <= 0)
break;
,
return 1;
);
spin_lock(&lockref->lock);
if (lockref->count <= 0)
return 0;
lockref->count++;
spin_unlock(&lockref->lock);
return 1;
}
EXPORT_SYMBOL(lockref_get_or_lock);
/**
* lockref_put_return - Decrement reference count if possible
* @lockref: pointer to lockref structure
*
* Decrement the reference count and return the new value.
* If the lockref was dead or locked, return an error.
*/
int lockref_put_return(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count--;
if (old.count <= 0)
return -1;
,
return new.count;
);
return -1;
}
EXPORT_SYMBOL(lockref_put_return);
/**
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
*/
int lockref_put_or_lock(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count--;
if (old.count <= 1)
break;
,
return 1;
);
spin_lock(&lockref->lock);
if (lockref->count <= 1)
return 0;
lockref->count--;
spin_unlock(&lockref->lock);
return 1;
}
EXPORT_SYMBOL(lockref_put_or_lock);
/**
* lockref_mark_dead - mark lockref dead
* @lockref: pointer to lockref structure
*/
void lockref_mark_dead(struct lockref *lockref)
{
assert_spin_locked(&lockref->lock);
lockref->count = -128;
}
EXPORT_SYMBOL(lockref_mark_dead);
/**
* lockref_get_not_dead - Increments count unless the ref is dead
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if lockref was dead
*/
int lockref_get_not_dead(struct lockref *lockref)
{
int retval;
CMPXCHG_LOOP(
new.count++;
if (old.count < 0)
return 0;
,
return 1;
);
spin_lock(&lockref->lock);
retval = 0;
if (lockref->count >= 0) {
lockref->count++;
retval = 1;
}
spin_unlock(&lockref->lock);
return retval;
}
EXPORT_SYMBOL(lockref_get_not_dead);
| gpl-2.0 |
rex-xxx/mt6572_x201 | external/openssl/ssl/ssl_err2.c | 872 | 3385 | /* ssl/ssl_err2.c */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#include <stdio.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
void SSL_load_error_strings(void)
{
#ifndef OPENSSL_NO_ERR
ERR_load_crypto_strings();
ERR_load_SSL_strings();
#endif
}
| gpl-2.0 |
cvpcs/android_kernel_omap | sound/sound_firmware.c | 872 | 1747 | #include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include "oss/sound_firmware.h"
static int do_mod_firmware_load(const char *fn, char **fp)
{
struct file* filp;
long l;
char *dp;
loff_t pos;
filp = filp_open(fn, 0, 0);
if (IS_ERR(filp))
{
printk(KERN_INFO "Unable to load '%s'.\n", fn);
return 0;
}
l = filp->f_path.dentry->d_inode->i_size;
if (l <= 0 || l > 131072)
{
printk(KERN_INFO "Invalid firmware '%s'\n", fn);
filp_close(filp, current->files);
return 0;
}
dp = vmalloc(l);
if (dp == NULL)
{
printk(KERN_INFO "Out of memory loading '%s'.\n", fn);
filp_close(filp, current->files);
return 0;
}
pos = 0;
if (vfs_read(filp, dp, l, &pos) != l)
{
printk(KERN_INFO "Failed to read '%s'.\n", fn);
vfree(dp);
filp_close(filp, current->files);
return 0;
}
filp_close(filp, current->files);
*fp = dp;
return (int) l;
}
/**
* mod_firmware_load - load sound driver firmware
* @fn: filename
* @fp: return for the buffer.
*
* Load the firmware for a sound module (up to 128K) into a buffer.
* The buffer is returned in *fp. It is allocated with vmalloc so is
* virtually linear and not DMAable. The caller should free it with
* vfree when finished.
*
* The length of the buffer is returned on a successful load, the
* value zero on a failure.
*
* Caution: This API is not recommended. Firmware should be loaded via
* request_firmware.
*/
int mod_firmware_load(const char *fn, char **fp)
{
int r;
mm_segment_t fs = get_fs();
set_fs(get_ds());
r = do_mod_firmware_load(fn, fp);
set_fs(fs);
return r;
}
EXPORT_SYMBOL(mod_firmware_load);
MODULE_LICENSE("GPL");
| gpl-2.0 |
SM-G920P/Hacker_Kernel_SM-G92X | drivers/dma/ioat/dma.c | 1640 | 34656 | /*
* Intel I/OAT DMA Linux driver
* Copyright(c) 2004 - 2009 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
/*
* This driver supports an Intel I/OAT DMA engine, which does asynchronous
* copy operations.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/i7300_idle.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
#include "../dmaengine.h"
int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)");
/* internal functions */
static void ioat1_cleanup(struct ioat_dma_chan *ioat);
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
/**
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
* @irq: interrupt id
* @data: interrupt data
*/
static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
{
struct ioatdma_device *instance = data;
struct ioat_chan_common *chan;
unsigned long attnstatus;
int bit;
u8 intrctrl;
intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
return IRQ_NONE;
if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_NONE;
}
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
if (test_bit(IOAT_RUN, &chan->state))
tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
return IRQ_HANDLED;
}
/**
* ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
* @irq: interrupt id
* @data: interrupt data
*/
static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
if (test_bit(IOAT_RUN, &chan->state))
tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
/* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
{
struct dma_device *dma = &device->common;
struct dma_chan *c = &chan->common;
unsigned long data = (unsigned long) c;
chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1));
spin_lock_init(&chan->cleanup_lock);
chan->common.device = dma;
dma_cookie_init(&chan->common);
list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan;
init_timer(&chan->timer);
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
}
/**
* ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
*/
static int ioat1_enumerate_channels(struct ioatdma_device *device)
{
u8 xfercap_scale;
u32 xfercap;
int i;
struct ioat_dma_chan *ioat;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
INIT_LIST_HEAD(&dma->channels);
dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
dma->chancnt &= 0x1f; /* bits [4:0] valid */
if (dma->chancnt > ARRAY_SIZE(device->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
dma->chancnt, ARRAY_SIZE(device->idx));
dma->chancnt = ARRAY_SIZE(device->idx);
}
xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_scale &= 0x1f; /* bits [4:0] valid */
xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
dma->chancnt--;
#endif
for (i = 0; i < dma->chancnt; i++) {
ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
if (!ioat)
break;
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc);
INIT_LIST_HEAD(&ioat->used_desc);
}
dma->chancnt = i;
return i;
}
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
static inline void
__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
{
void __iomem *reg_base = ioat->base.reg_base;
dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
__func__, ioat->pending);
ioat->pending = 0;
writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
}
static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat = to_ioat_chan(chan);
if (ioat->pending > 0) {
spin_lock_bh(&ioat->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat);
spin_unlock_bh(&ioat->desc_lock);
}
}
/**
* ioat1_reset_channel - restart a channel
* @ioat: IOAT DMA channel handle
*/
static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
void __iomem *reg_base = chan->reg_base;
u32 chansts, chanerr;
dev_warn(to_dev(chan), "reset\n");
chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
chansts = *chan->completion & IOAT_CHANSTS_STATUS;
if (chanerr) {
dev_err(to_dev(chan),
"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
chan_num(chan), chansts, chanerr);
writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
}
/*
* whack it upside the head with a reset
* and wait for things to settle out.
* force the pending count to a really big negative
* to make sure no one forces an issue_pending
* while we're waiting.
*/
ioat->pending = INT_MIN;
writeb(IOAT_CHANCMD_RESET,
reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
set_bit(IOAT_RESET_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + RESET_DELAY);
}
static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
struct ioat_dma_chan *ioat = to_ioat_chan(c);
struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_desc_sw *first;
struct ioat_desc_sw *chain_tail;
dma_cookie_t cookie;
spin_lock_bh(&ioat->desc_lock);
/* cookie incr and addition to used_list must be atomic */
cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
/* write address into NextDescriptor field of last desc in chain */
first = to_ioat_desc(desc->tx_list.next);
chain_tail = to_ioat_desc(ioat->used_desc.prev);
/* make descriptor updates globally visible before chaining */
wmb();
chain_tail->hw->next = first->txd.phys;
list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
dump_desc_dbg(ioat, chain_tail);
dump_desc_dbg(ioat, first);
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
ioat->active += desc->hw->tx_cnt;
ioat->pending += desc->hw->tx_cnt;
if (ioat->pending >= ioat_pending_level)
__ioat1_dma_memcpy_issue_pending(ioat);
spin_unlock_bh(&ioat->desc_lock);
return cookie;
}
/**
* ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
* @ioat: the channel supplying the memory pool for the descriptors
* @flags: allocation flags
*/
static struct ioat_desc_sw *
ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
{
struct ioat_dma_descriptor *desc;
struct ioat_desc_sw *desc_sw;
struct ioatdma_device *ioatdma_device;
dma_addr_t phys;
ioatdma_device = ioat->base.device;
desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
if (unlikely(!desc))
return NULL;
desc_sw = kzalloc(sizeof(*desc_sw), flags);
if (unlikely(!desc_sw)) {
pci_pool_free(ioatdma_device->dma_pool, desc, phys);
return NULL;
}
memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc_sw->tx_list);
dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
desc_sw->txd.tx_submit = ioat1_tx_submit;
desc_sw->hw = desc;
desc_sw->txd.phys = phys;
set_desc_id(desc_sw, -1);
return desc_sw;
}
static int ioat_initial_desc_count = 256;
module_param(ioat_initial_desc_count, int, 0644);
MODULE_PARM_DESC(ioat_initial_desc_count,
"ioat1: initial descriptors per channel (default: 256)");
/**
* ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
* @chan: the channel to be filled out
*/
static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
{
struct ioat_dma_chan *ioat = to_ioat_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_desc_sw *desc;
u32 chanerr;
int i;
LIST_HEAD(tmp_list);
/* have we already been set up? */
if (!list_empty(&ioat->free_desc))
return ioat->desccount;
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
if (chanerr) {
dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
}
/* Allocate descriptors */
for (i = 0; i < ioat_initial_desc_count; i++) {
desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
if (!desc) {
dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
break;
}
set_desc_id(desc, i);
list_add_tail(&desc->node, &tmp_list);
}
spin_lock_bh(&ioat->desc_lock);
ioat->desccount = i;
list_splice(&tmp_list, &ioat->free_desc);
spin_unlock_bh(&ioat->desc_lock);
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
GFP_KERNEL, &chan->completion_dma);
memset(chan->completion, 0, sizeof(*chan->completion));
writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
void ioat_stop(struct ioat_chan_common *chan)
{
struct ioatdma_device *device = chan->device;
struct pci_dev *pdev = device->pdev;
int chan_id = chan_num(chan);
struct msix_entry *msix;
/* 1/ stop irq from firing tasklets
* 2/ stop the tasklet from re-arming irqs
*/
clear_bit(IOAT_RUN, &chan->state);
/* flush inflight interrupts */
switch (device->irq_mode) {
case IOAT_MSIX:
msix = &device->msix_entries[chan_id];
synchronize_irq(msix->vector);
break;
case IOAT_MSI:
case IOAT_INTX:
synchronize_irq(pdev->irq);
break;
default:
break;
}
/* flush inflight timers */
del_timer_sync(&chan->timer);
/* flush inflight tasklet runs */
tasklet_kill(&chan->cleanup_task);
/* final cleanup now that everything is quiesced and can't re-arm */
device->cleanup_fn((unsigned long) &chan->common);
}
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
*/
static void ioat1_dma_free_chan_resources(struct dma_chan *c)
{
struct ioat_dma_chan *ioat = to_ioat_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *ioatdma_device = chan->device;
struct ioat_desc_sw *desc, *_desc;
int in_use_descs = 0;
/* Before freeing channel resources first check
* if they have been previously allocated for this channel.
*/
if (ioat->desccount == 0)
return;
ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
*/
writeb(IOAT_CHANCMD_RESET,
chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
mdelay(100);
spin_lock_bh(&ioat->desc_lock);
list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
__func__, desc_id(desc));
dump_desc_dbg(ioat, desc);
in_use_descs++;
list_del(&desc->node);
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
desc->txd.phys);
kfree(desc);
}
list_for_each_entry_safe(desc, _desc,
&ioat->free_desc, node) {
list_del(&desc->node);
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
desc->txd.phys);
kfree(desc);
}
spin_unlock_bh(&ioat->desc_lock);
pci_pool_free(ioatdma_device->completion_pool,
chan->completion,
chan->completion_dma);
/* one is ok since we left it on there on purpose */
if (in_use_descs > 1)
dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
in_use_descs - 1);
chan->last_completion = 0;
chan->completion_dma = 0;
ioat->pending = 0;
ioat->desccount = 0;
}
/**
* ioat1_dma_get_next_descriptor - return the next available descriptor
* @ioat: IOAT DMA channel handle
*
* Gets the next descriptor from the chain, and must be called with the
* channel's desc_lock held. Allocates more descriptors if the channel
* has run out.
*/
static struct ioat_desc_sw *
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
{
struct ioat_desc_sw *new;
if (!list_empty(&ioat->free_desc)) {
new = to_ioat_desc(ioat->free_desc.next);
list_del(&new->node);
} else {
/* try to get another desc */
new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
if (!new) {
dev_err(to_dev(&ioat->base), "alloc failed\n");
return NULL;
}
}
dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
__func__, desc_id(new));
prefetch(new->hw);
return new;
}
static struct dma_async_tx_descriptor *
ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct ioat_dma_chan *ioat = to_ioat_chan(c);
struct ioat_desc_sw *desc;
size_t copy;
LIST_HEAD(chain);
dma_addr_t src = dma_src;
dma_addr_t dest = dma_dest;
size_t total_len = len;
struct ioat_dma_descriptor *hw = NULL;
int tx_cnt = 0;
spin_lock_bh(&ioat->desc_lock);
desc = ioat1_dma_get_next_descriptor(ioat);
do {
if (!desc)
break;
tx_cnt++;
copy = min_t(size_t, len, ioat->xfercap);
hw = desc->hw;
hw->size = copy;
hw->ctl = 0;
hw->src_addr = src;
hw->dst_addr = dest;
list_add_tail(&desc->node, &chain);
len -= copy;
dest += copy;
src += copy;
if (len) {
struct ioat_desc_sw *next;
async_tx_ack(&desc->txd);
next = ioat1_dma_get_next_descriptor(ioat);
hw->next = next ? next->txd.phys : 0;
dump_desc_dbg(ioat, desc);
desc = next;
} else
hw->next = 0;
} while (len);
if (!desc) {
struct ioat_chan_common *chan = &ioat->base;
dev_err(to_dev(chan),
"chan%d - get_next_desc failed\n", chan_num(chan));
list_splice(&chain, &ioat->free_desc);
spin_unlock_bh(&ioat->desc_lock);
return NULL;
}
spin_unlock_bh(&ioat->desc_lock);
desc->txd.flags = flags;
desc->len = total_len;
list_splice(&chain, &desc->tx_list);
hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
hw->ctl_f.compl_write = 1;
hw->tx_cnt = tx_cnt;
dump_desc_dbg(ioat, desc);
return &desc->txd;
}
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
if (!test_bit(IOAT_RUN, &chan->state))
return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw)
{
struct pci_dev *pdev = chan->device->pdev;
size_t offset = len - hw->size;
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
ioat_unmap(pdev, hw->dst_addr - offset, len,
PCI_DMA_FROMDEVICE, flags, 1);
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
ioat_unmap(pdev, hw->src_addr - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
dma_addr_t phys_complete;
u64 completion;
completion = *chan->completion;
phys_complete = ioat_chansts_to_addr(completion);
dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
(unsigned long long) phys_complete);
if (is_ioat_halted(completion)) {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
chanerr);
/* TODO do something to salvage the situation */
}
return phys_complete;
}
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete)
{
*phys_complete = ioat_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
return false;
clear_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return true;
}
static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct list_head *_desc, *n;
struct dma_async_tx_descriptor *tx;
dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
__func__, (unsigned long long) phys_complete);
list_for_each_safe(_desc, n, &ioat->used_desc) {
struct ioat_desc_sw *desc;
prefetch(n);
desc = list_entry(_desc, typeof(*desc), node);
tx = &desc->txd;
/*
* Incoming DMA requests may use multiple descriptors,
* due to exceeding xfercap, perhaps. If so, only the
* last one will have a cookie, and require unmapping.
*/
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
dma_cookie_complete(tx);
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
ioat->active -= desc->hw->tx_cnt;
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
}
}
if (tx->phys != phys_complete) {
/*
* a completed entry, but not the last, so clean
* up if the client is done with the descriptor
*/
if (async_tx_test_ack(tx))
list_move_tail(&desc->node, &ioat->free_desc);
} else {
/*
* last used desc. Do not remove, so we can
* append from it.
*/
/* if nothing else is pending, cancel the
* completion timeout
*/
if (n == &ioat->used_desc) {
dev_dbg(to_dev(chan),
"%s cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
}
/* TODO check status bits? */
break;
}
}
chan->last_completion = phys_complete;
}
/**
* ioat1_cleanup - cleanup up finished descriptors
* @chan: ioat channel to be cleaned up
*
* To prevent lock contention we defer cleanup when the locks are
* contended with a terminal timeout that forces cleanup and catches
* completion notification errors.
*/
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
prefetch(chan->completion);
if (!spin_trylock_bh(&chan->cleanup_lock))
return;
if (!ioat_cleanup_preamble(chan, &phys_complete)) {
spin_unlock_bh(&chan->cleanup_lock);
return;
}
if (!spin_trylock_bh(&ioat->desc_lock)) {
spin_unlock_bh(&chan->cleanup_lock);
return;
}
__cleanup(ioat, phys_complete);
spin_unlock_bh(&ioat->desc_lock);
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat1_timer_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
spin_lock_bh(&chan->cleanup_lock);
if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
struct ioat_desc_sw *desc;
spin_lock_bh(&ioat->desc_lock);
/* restart active descriptors */
desc = to_ioat_desc(ioat->used_desc.prev);
ioat_set_chainaddr(ioat, desc->txd.phys);
ioat_start(chan);
ioat->pending = 0;
set_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat->desc_lock);
} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
dma_addr_t phys_complete;
spin_lock_bh(&ioat->desc_lock);
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
ioat1_reset_channel(ioat);
else {
u64 status = ioat_chansts(chan);
/* manually update the last completion address */
if (ioat_chansts_to_addr(status) != 0)
*chan->completion = status;
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
spin_unlock_bh(&ioat->desc_lock);
}
spin_unlock_bh(&chan->cleanup_lock);
}
enum dma_status
ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
device->cleanup_fn((unsigned long) c);
return dma_cookie_status(c, cookie, txstate);
}
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
struct ioat_desc_sw *desc;
struct ioat_dma_descriptor *hw;
spin_lock_bh(&ioat->desc_lock);
desc = ioat1_dma_get_next_descriptor(ioat);
if (!desc) {
dev_err(to_dev(chan),
"Unable to start null desc - get next desc failed\n");
spin_unlock_bh(&ioat->desc_lock);
return;
}
hw = desc->hw;
hw->ctl = 0;
hw->ctl_f.null = 1;
hw->ctl_f.int_en = 1;
hw->ctl_f.compl_write = 1;
/* set size to non-zero value (channel returns error when size is 0) */
hw->size = NULL_DESC_BUFFER_SIZE;
hw->src_addr = 0;
hw->dst_addr = 0;
async_tx_ack(&desc->txd);
hw->next = 0;
list_add_tail(&desc->node, &ioat->used_desc);
dump_desc_dbg(ioat, desc);
ioat_set_chainaddr(ioat, desc->txd.phys);
ioat_start(chan);
spin_unlock_bh(&ioat->desc_lock);
}
/*
* Perform a IOAT transaction to verify the HW works.
*/
#define IOAT_TEST_SIZE 2000
static void ioat_dma_test_callback(void *dma_async_param)
{
struct completion *cmp = dma_async_param;
complete(cmp);
}
/**
* ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
* @device: device to be tested
*/
int ioat_dma_self_test(struct ioatdma_device *device)
{
int i;
u8 *src;
u8 *dest;
struct dma_device *dma = &device->common;
struct device *dev = &device->pdev->dev;
struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int err = 0;
struct completion cmp;
unsigned long tmo;
unsigned long flags;
src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
for (i = 0; i < IOAT_TEST_SIZE; i++)
src[i] = (u8)i;
/* Start copy, using first DMA channel */
dma_chan = container_of(dma->channels.next, struct dma_chan,
device_node);
if (dma->device_alloc_chan_resources(dma_chan) < 1) {
dev_err(dev, "selftest cannot allocate chan resource\n");
err = -ENODEV;
goto out;
}
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
dev_err(dev, "Self-test prep failed, disabling\n");
err = -ENODEV;
goto unmap_dma;
}
async_tx_ack(tx);
init_completion(&cmp);
tx->callback = ioat_dma_test_callback;
tx->callback_param = &cmp;
cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(dev, "Self-test setup failed, disabling\n");
err = -ENODEV;
goto unmap_dma;
}
dma->device_issue_pending(dma_chan);
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 ||
dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto unmap_dma;
}
if (memcmp(src, dest, IOAT_TEST_SIZE)) {
dev_err(dev, "Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
unmap_dma:
dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
kfree(src);
kfree(dest);
return err;
}
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
"set ioat interrupt style: msix (default), "
"msix-single-vector, msi, intx)");
/**
* ioat_dma_setup_interrupts - setup interrupt handler
* @device: ioat device
*/
int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
struct ioat_chan_common *chan;
struct pci_dev *pdev = device->pdev;
struct device *dev = &pdev->dev;
struct msix_entry *msix;
int i, j, msixcnt;
int err = -EINVAL;
u8 intrctrl = 0;
if (!strcmp(ioat_interrupt_style, "msix"))
goto msix;
if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
goto msix_single_vector;
if (!strcmp(ioat_interrupt_style, "msi"))
goto msi;
if (!strcmp(ioat_interrupt_style, "intx"))
goto intx;
dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
goto err_no_irq;
msix:
/* The number of MSI-X vectors should equal the number of channels */
msixcnt = device->common.chancnt;
for (i = 0; i < msixcnt; i++)
device->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
if (err < 0)
goto msi;
if (err > 0)
goto msix_single_vector;
for (i = 0; i < msixcnt; i++) {
msix = &device->msix_entries[i];
chan = ioat_chan_by_index(device, i);
err = devm_request_irq(dev, msix->vector,
ioat_dma_do_interrupt_msix, 0,
"ioat-msix", chan);
if (err) {
for (j = 0; j < i; j++) {
msix = &device->msix_entries[j];
chan = ioat_chan_by_index(device, j);
devm_free_irq(dev, msix->vector, chan);
}
goto msix_single_vector;
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
device->irq_mode = IOAT_MSIX;
goto done;
msix_single_vector:
msix = &device->msix_entries[0];
msix->entry = 0;
err = pci_enable_msix(pdev, device->msix_entries, 1);
if (err)
goto msi;
err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
"ioat-msix", device);
if (err) {
pci_disable_msix(pdev);
goto msi;
}
device->irq_mode = IOAT_MSIX_SINGLE;
goto done;
msi:
err = pci_enable_msi(pdev);
if (err)
goto intx;
err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
"ioat-msi", device);
if (err) {
pci_disable_msi(pdev);
goto intx;
}
device->irq_mode = IOAT_MSIX;
goto done;
intx:
err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
IRQF_SHARED, "ioat-intx", device);
if (err)
goto err_no_irq;
device->irq_mode = IOAT_INTX;
done:
if (device->intr_quirk)
device->intr_quirk(device);
intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
return 0;
err_no_irq:
/* Disable all interrupt generation */
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
device->irq_mode = IOAT_NOIRQ;
dev_err(dev, "no usable interrupts\n");
return err;
}
EXPORT_SYMBOL(ioat_dma_setup_interrupts);
static void ioat_disable_interrupts(struct ioatdma_device *device)
{
/* Disable all interrupt generation */
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
}
int ioat_probe(struct ioatdma_device *device)
{
int err = -ENODEV;
struct dma_device *dma = &device->common;
struct pci_dev *pdev = device->pdev;
struct device *dev = &pdev->dev;
/* DMA coherent memory pool for DMA descriptor allocations */
device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
sizeof(struct ioat_dma_descriptor),
64, 0);
if (!device->dma_pool) {
err = -ENOMEM;
goto err_dma_pool;
}
device->completion_pool = pci_pool_create("completion_pool", pdev,
sizeof(u64), SMP_CACHE_BYTES,
SMP_CACHE_BYTES);
if (!device->completion_pool) {
err = -ENOMEM;
goto err_completion_pool;
}
device->enumerate_channels(device);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->dev = &pdev->dev;
if (!dma->chancnt) {
dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
err = ioat_dma_setup_interrupts(device);
if (err)
goto err_setup_interrupts;
err = device->self_test(device);
if (err)
goto err_self_test;
return 0;
err_self_test:
ioat_disable_interrupts(device);
err_setup_interrupts:
pci_pool_destroy(device->completion_pool);
err_completion_pool:
pci_pool_destroy(device->dma_pool);
err_dma_pool:
return err;
}
int ioat_register(struct ioatdma_device *device)
{
int err = dma_async_device_register(&device->common);
if (err) {
ioat_disable_interrupts(device);
pci_pool_destroy(device->completion_pool);
pci_pool_destroy(device->dma_pool);
}
return err;
}
/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
static void ioat1_intr_quirk(struct ioatdma_device *device)
{
struct pci_dev *pdev = device->pdev;
u32 dmactrl;
pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
if (pdev->msi_enabled)
dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
else
dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
}
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioat_dma_chan *ioat = to_ioat_chan(c);
return sprintf(page, "%d\n", ioat->desccount);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
struct ioat_dma_chan *ioat = to_ioat_chan(c);
return sprintf(page, "%d\n", ioat->active);
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static ssize_t cap_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
return sprintf(page, "copy%s%s%s%s%s%s\n",
dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
}
struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
static ssize_t version_show(struct dma_chan *c, char *page)
{
struct dma_device *dma = c->device;
struct ioatdma_device *device = to_ioatdma_device(dma);
return sprintf(page, "%d.%d\n",
device->version >> 4, device->version & 0xf);
}
struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
static struct attribute *ioat1_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
&ioat_version_attr.attr,
NULL,
};
static ssize_t
ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct ioat_sysfs_entry *entry;
struct ioat_chan_common *chan;
entry = container_of(attr, struct ioat_sysfs_entry, attr);
chan = container_of(kobj, struct ioat_chan_common, kobj);
if (!entry->show)
return -EIO;
return entry->show(&chan->common, page);
}
const struct sysfs_ops ioat_sysfs_ops = {
.show = ioat_attr_show,
};
static struct kobj_type ioat1_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
.default_attrs = ioat1_attrs,
};
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
{
struct dma_device *dma = &device->common;
struct dma_chan *c;
list_for_each_entry(c, &dma->channels, device_node) {
struct ioat_chan_common *chan = to_chan_common(c);
struct kobject *parent = &c->dev->device.kobj;
int err;
err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
if (err) {
dev_warn(to_dev(chan),
"sysfs init error (%d), continuing...\n", err);
kobject_put(&chan->kobj);
set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
}
}
}
void ioat_kobject_del(struct ioatdma_device *device)
{
struct dma_device *dma = &device->common;
struct dma_chan *c;
list_for_each_entry(c, &dma->channels, device_node) {
struct ioat_chan_common *chan = to_chan_common(c);
if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
kobject_del(&chan->kobj);
kobject_put(&chan->kobj);
}
}
}
int ioat1_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
int err;
device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test;
device->timer_fn = ioat1_timer_event;
device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
return err;
ioat_set_tcp_copy_break(4096);
err = ioat_register(device);
if (err)
return err;
ioat_kobject_add(device, &ioat1_ktype);
if (dca)
device->dca = ioat_dca_init(pdev, device->reg_base);
return err;
}
void ioat_dma_remove(struct ioatdma_device *device)
{
struct dma_device *dma = &device->common;
ioat_disable_interrupts(device);
ioat_kobject_del(device);
dma_async_device_unregister(dma);
pci_pool_destroy(device->dma_pool);
pci_pool_destroy(device->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
| gpl-2.0 |
Mazout360/kernel-maz | arch/mips/powertv/asic/prealloc-calliope.c | 2152 | 9503 | /*
* Memory pre-allocations for Calliope boxes.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author: Ken Eppinett
* David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <asm/mach-powertv/asic.h>
#include "prealloc.h"
/*
* NON_DVR_CAPABLE CALLIOPE RESOURCES
*/
struct resource non_dvr_calliope_resources[] __initdata =
{
/*
* VIDEO / LX1
*/
/* Delta-Mu 1 image (2MiB) */
PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 monitor (8KiB) */
PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
IORESOURCE_MEM)
/*
* Sysaudio Driver
*/
/* DSP code and data images (1MiB) */
PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC CPU PCM buffer (40KiB) */
PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC AUX buffer (128KiB) */
PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC Main buffer (128KiB) */
PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* STAVEM driver/STAPI
*/
/* 6MiB */
PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* DOCSIS Subsystem
*/
/* 7MiB */
PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
/*
* GHW HAL Driver
*/
/* PowerTV Graphics Heap (14MiB) */
PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
IORESOURCE_MEM)
/*
* multi com buffer area
*/
/* 128KiB */
PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
IORESOURCE_MEM)
/*
* DMA Ring buffer (don't need recording buffers)
*/
/* 680KiB */
PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Display bins buffer for unit0
*/
/* 4KiB */
PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* AVFS: player HAL memory
*/
/* 945K * 3 for playback */
PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* PMEM
*/
/* Persistent memory for diagnostics (64KiB) */
PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Smartcard
*/
/* Read and write buffers for Internal/External cards (10KiB) */
PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* NAND Flash
*/
/* 10KiB */
PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
IORESOURCE_MEM)
/*
* Synopsys GMAC Memory Region
*/
/* 64KiB */
PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* TFTPBuffer
*
* This buffer is used in some minimal configurations (e.g. two-way
* loader) for storing software images
*/
PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Add other resources here
*/
/*
* End of Resource marker
*/
{
.flags = 0,
},
};
struct resource non_dvr_vze_calliope_resources[] __initdata =
{
/*
* VIDEO / LX1
*/
/* Delta-Mu 1 image (2MiB) */
PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 monitor (8KiB) */
PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 RAM (10.12MiB) */
PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
IORESOURCE_MEM)
/*
* Sysaudio Driver
*/
/* DSP code and data images (1MiB) */
PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC CPU PCM buffer (40KiB) */
PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC AUX buffer (16KiB) */
PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC Main buffer (16KiB) */
PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* STAVEM driver/STAPI
*/
/* 3.125MiB */
PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
IORESOURCE_MEM)
/*
* GHW HAL Driver
*/
/* PowerTV Graphics Heap (2.59MiB) */
PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
IORESOURCE_MEM)
/*
* multi com buffer area
*/
/* 128KiB */
PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
IORESOURCE_MEM)
/*
* DMA Ring buffer (don't need recording buffers)
*/
/* 680KiB */
PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Display bins buffer for unit0
*/
/* 4KiB */
PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* PMEM
*/
/* Persistent memory for diagnostics (64KiB) */
PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Smartcard
*/
/* Read and write buffers for Internal/External cards (10KiB) */
PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* NAND Flash
*/
/* 10KiB */
PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
IORESOURCE_MEM)
/*
* Synopsys GMAC Memory Region
*/
/* 64KiB */
PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Add other resources here
*/
/*
* End of Resource marker
*/
{
.flags = 0,
},
};
struct resource non_dvr_vzf_calliope_resources[] __initdata =
{
/*
* VIDEO / LX1
*/
/* Delta-Mu 1 image (2MiB) */
PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 monitor (8KiB) */
PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
IORESOURCE_MEM)
/* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
IORESOURCE_MEM)
/*
* Sysaudio Driver
*/
/* DSP code and data images (1MiB) */
PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC CPU PCM buffer (40KiB) */
PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC AUX buffer (128KiB) */
PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/* ADSC Main buffer (128KiB) */
PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* STAVEM driver/STAPI
*/
/* 4.5MiB */
PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* GHW HAL Driver
*/
/* PowerTV Graphics Heap (14MiB) */
PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
IORESOURCE_MEM)
/*
* multi com buffer area
*/
/* 128KiB */
PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
IORESOURCE_MEM)
/*
* DMA Ring buffer (don't need recording buffers)
*/
/* 680KiB */
PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Display bins buffer for unit0
*/
/* 4KiB */
PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Display bins buffer for unit1
*/
/* 4KiB */
PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* AVFS: player HAL memory
*/
/* 945K * 3 for playback */
PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* PMEM
*/
/* Persistent memory for diagnostics (64KiB) */
PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Smartcard
*/
/* Read and write buffers for Internal/External cards (10KiB) */
PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* NAND Flash
*/
/* 10KiB */
PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
IORESOURCE_MEM)
/*
* Synopsys GMAC Memory Region
*/
/* 64KiB */
PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
(IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
/*
* Add other resources here
*/
/*
* End of Resource marker
*/
{
.flags = 0,
},
};
| gpl-2.0 |
lgeek/linux-tronsmart-orion-r28 | net/ipv6/protocol.c | 2408 | 2152 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PF_INET6 protocol dispatch tables.
*
* Authors: Pedro Roque <roque@di.fc.ul.pt>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
*
* Vince Laviano (vince@cs.stanford.edu) 16 May 2001
* - Removed unused variable 'inet6_protocol_base'
* - Modified inet6_del_protocol() to correctly maintain copy bit.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
EXPORT_SYMBOL(inet6_protos);
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
/*
* Remove a protocol from the hash tables.
*/
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int ret;
ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
return ret;
}
EXPORT_SYMBOL(inet6_del_protocol);
#endif
const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly;
int inet6_add_offload(const struct net_offload *prot, unsigned char protocol)
{
return !cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_offload);
int inet6_del_offload(const struct net_offload *prot, unsigned char protocol)
{
int ret;
ret = (cmpxchg((const struct net_offload **)&inet6_offloads[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
return ret;
}
EXPORT_SYMBOL(inet6_del_offload);
| gpl-2.0 |
AndroidDeveloperAlliance/ZenKernel_TUNA | drivers/gpu/drm/nouveau/nv17_tv.c | 2664 | 24213 | /*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
uint32_t sample = 0;
int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
if (dev_priv->vbios.tvdactestval)
testval = dev_priv->vbios.tvdactestval;
dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
gpio->set(dev, DCB_GPIO_TVDAC1, true);
gpio->set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
(dacclk & ~0xff) | 0x22);
msleep(1);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
(dacclk & ~0xff) | 0x21);
NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
/* Sample pin 0x4 (usually S-video luma). */
NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
msleep(20);
sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
& 0x4 << 28;
/* Sample the remaining pins. */
NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
msleep(20);
sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
& 0xa << 28;
/* Restore the previous state. */
NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
/* Zotac FX5200 */
if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
*pin_mask = 0xc;
return false;
}
/* MSI nForce2 IGP */
if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
return true;
}
static enum drm_connector_status
nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_entry *dcb = tv_enc->base.dcb;
bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
if (nv04_dac_in_use(encoder))
return connector_status_disconnected;
if (reliable) {
if (dev_priv->chipset == 0x42 ||
dev_priv->chipset == 0x43)
tv_enc->pin_mask =
nv42_tv_sample_load(encoder) >> 28 & 0xe;
else
tv_enc->pin_mask =
nv17_dac_sample_load(encoder) >> 28 & 0xe;
}
switch (tv_enc->pin_mask) {
case 0x2:
case 0x4:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
break;
case 0xc:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
break;
case 0xe:
if (dcb->tvconf.has_component_output)
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
else
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
break;
default:
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
break;
}
drm_connector_property_set_value(connector,
conf->tv_subconnector_property,
tv_enc->subconnector);
if (!reliable) {
return connector_status_unknown;
} else if (tv_enc->subconnector) {
NV_INFO(dev, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
}
}
static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
mode->htotal / 1000 *
mode->vtotal / 1000;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
mode->clock *= 2;
if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
n++;
}
return n;
}
static int nv17_tv_get_hd_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode;
struct drm_display_mode *mode;
const struct {
int hdisplay;
int vdisplay;
} modes[] = {
{ 640, 400 },
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 800, 600 },
{ 1024, 768 },
{ 1280, 720 },
{ 1280, 1024 },
{ 1920, 1080 }
};
int i, n = 0;
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (modes[i].hdisplay > output_mode->hdisplay ||
modes[i].vdisplay > output_mode->vdisplay)
continue;
if (modes[i].hdisplay == output_mode->hdisplay &&
modes[i].vdisplay == output_mode->vdisplay) {
mode = drm_mode_duplicate(encoder->dev, output_mode);
mode->type |= DRM_MODE_TYPE_PREFERRED;
} else {
mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
modes[i].vdisplay, 60, false,
(output_mode->flags &
DRM_MODE_FLAG_INTERLACE), false);
}
/* CVT modes are sometimes unsuitable... */
if (output_mode->hdisplay <= 720
|| output_mode->hdisplay >= 1920) {
mode->htotal = output_mode->htotal;
mode->hsync_start = (mode->hdisplay + (mode->htotal
- mode->hdisplay) * 9 / 10) & ~7;
mode->hsync_end = mode->hsync_start + 8;
}
if (output_mode->vdisplay >= 1024) {
mode->vtotal = output_mode->vtotal;
mode->vsync_start = output_mode->vsync_start;
mode->vsync_end = output_mode->vsync_end;
}
mode->type |= DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, mode);
n++;
}
return n;
}
static int nv17_tv_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (tv_norm->kind == CTV_ENC_MODE)
return nv17_tv_get_hd_modes(encoder, connector);
else
return nv17_tv_get_ld_modes(encoder, connector);
}
static int nv17_tv_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (tv_norm->kind == CTV_ENC_MODE) {
struct drm_display_mode *output_mode =
&tv_norm->ctv_enc_mode.mode;
if (mode->clock > 400000)
return MODE_CLOCK_HIGH;
if (mode->hdisplay > output_mode->hdisplay ||
mode->vdisplay > output_mode->vdisplay)
return MODE_BAD;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
(output_mode->flags & DRM_MODE_FLAG_INTERLACE))
return MODE_NO_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
} else {
const int vsync_tolerance = 600;
if (mode->clock > 70000)
return MODE_CLOCK_HIGH;
if (abs(drm_mode_vrefresh(mode) * 1000 -
tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
return MODE_VSYNC;
/* The encoder takes care of the actual interlacing */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
}
return MODE_OK;
}
static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (nv04_dac_in_use(encoder))
return false;
if (tv_norm->kind == CTV_ENC_MODE)
adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
else
adjusted_mode->clock = 90000;
return true;
}
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
if (nouveau_encoder(encoder)->last_dpms == mode)
return;
nouveau_encoder(encoder)->last_dpms = mode;
NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
mode, nouveau_encoder(encoder)->dcb->index);
regs->ptv_200 &= ~1;
if (tv_norm->kind == CTV_ENC_MODE) {
nv04_dfp_update_fp_control(encoder, mode);
} else {
nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
if (mode == DRM_MODE_DPMS_ON)
regs->ptv_200 |= 1;
}
nv_load_ptv(dev, regs, 200);
gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
static void nv17_tv_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *helper = encoder->helper_private;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
NV_CIO_CRE_LCD__INDEX];
uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder);
uint32_t dacclk;
helper->dpms(encoder, DRM_MODE_DPMS_OFF);
nv04_dfp_disable(dev, head);
/* Unbind any FP encoders from this head if we need the FP
* stuff enabled. */
if (tv_norm->kind == CTV_ENC_MODE) {
struct drm_encoder *enc;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
if ((dcb->type == OUTPUT_TMDS ||
dcb->type == OUTPUT_LVDS) &&
!enc->crtc &&
nv04_dfp_get_bound_head(dev, dcb) == head) {
nv04_dfp_bind_head(dev, dcb, head ^ 1,
dev_priv->vbios.fp.dual_link);
}
}
}
if (tv_norm->kind == CTV_ENC_MODE)
*cr_lcd |= 0x1 | (head ? 0x0 : 0x8);
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
if (dev_priv->card_type == NV_40)
dacclk |= 0x1a << 16;
if (tv_norm->kind == CTV_ENC_MODE) {
dacclk |= 0x20;
if (head)
dacclk |= 0x100;
else
dacclk &= ~0x100;
} else {
dacclk |= 0x10;
}
NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
}
static void nv17_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *drm_mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int i;
regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
regs->tv_setup = 1;
regs->ramdac_8c0 = 0x0;
if (tv_norm->kind == TV_ENC_MODE) {
tv_regs->ptv_200 = 0x13111100;
if (head)
tv_regs->ptv_200 |= 0x10;
tv_regs->ptv_20c = 0x808010;
tv_regs->ptv_304 = 0x2d00000;
tv_regs->ptv_600 = 0x0;
tv_regs->ptv_60c = 0x0;
tv_regs->ptv_610 = 0x1e00000;
if (tv_norm->tv_enc_mode.vdisplay == 576) {
tv_regs->ptv_508 = 0x1200000;
tv_regs->ptv_614 = 0x33;
} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
tv_regs->ptv_508 = 0xf00000;
tv_regs->ptv_614 = 0x13;
}
if (dev_priv->card_type >= NV_30) {
tv_regs->ptv_500 = 0xe8e0;
tv_regs->ptv_504 = 0x1710;
tv_regs->ptv_604 = 0x0;
tv_regs->ptv_608 = 0x0;
} else {
if (tv_norm->tv_enc_mode.vdisplay == 576) {
tv_regs->ptv_604 = 0x20;
tv_regs->ptv_608 = 0x10;
tv_regs->ptv_500 = 0x19710;
tv_regs->ptv_504 = 0x68f0;
} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
tv_regs->ptv_604 = 0x10;
tv_regs->ptv_608 = 0x20;
tv_regs->ptv_500 = 0x4b90;
tv_regs->ptv_504 = 0x1b480;
}
}
for (i = 0; i < 0x40; i++)
tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
} else {
struct drm_display_mode *output_mode =
&tv_norm->ctv_enc_mode.mode;
/* The registers in PRAMDAC+0xc00 control some timings and CSC
* parameters for the CTV encoder (It's only used for "HD" TV
* modes, I don't think I have enough working to guess what
* they exactly mean...), it's probably connected at the
* output of the FP encoder, but it also needs the analog
* encoder in its OR enabled and routed to the head it's
* using. It's enabled with the DACCLK register, bits [5:4].
*/
for (i = 0; i < 38; i++)
regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
regs->fp_horiz_regs[FP_SYNC_START] =
output_mode->hsync_start - 1;
regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
max((output_mode->hdisplay-600)/40 - 1, 1);
regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
regs->fp_vert_regs[FP_SYNC_START] =
output_mode->vsync_start - 1;
regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
regs->fp_debug_2 = 0;
regs->fp_margin_color = 0x801080;
}
}
static void nv17_tv_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_encoder_helper_funcs *helper = encoder->helper_private;
if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
nv17_tv_update_rescaler(encoder);
nv17_tv_update_properties(encoder);
} else {
nv17_ctv_update_rescaler(encoder);
}
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
/* This could use refinement for flatpanels, but it should work */
if (dev_priv->chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0x00100000);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
drm_get_connector_name(
&nouveau_encoder_connector_get(nv_encoder)->base),
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv17_tv_save(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
nouveau_encoder(encoder)->restore.output =
NVReadRAMDAC(dev, 0,
NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder));
nv17_tv_state_save(dev, &tv_enc->saved_state);
tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
}
static void nv17_tv_restore(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
nv04_dac_output_offset(encoder),
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
NUM_LD_TV_NORMS;
int i;
if (nouveau_tv_norm) {
for (i = 0; i < num_tv_norms; i++) {
if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
tv_enc->tv_norm = i;
break;
}
}
if (i == num_tv_norms)
NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
nouveau_tv_norm);
}
drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
drm_connector_attach_property(connector,
conf->tv_select_subconnector_property,
tv_enc->select_subconnector);
drm_connector_attach_property(connector,
conf->tv_subconnector_property,
tv_enc->subconnector);
drm_connector_attach_property(connector,
conf->tv_mode_property,
tv_enc->tv_norm);
drm_connector_attach_property(connector,
conf->tv_flicker_reduction_property,
tv_enc->flicker);
drm_connector_attach_property(connector,
conf->tv_saturation_property,
tv_enc->saturation);
drm_connector_attach_property(connector,
conf->tv_hue_property,
tv_enc->hue);
drm_connector_attach_property(connector,
conf->tv_overscan_property,
tv_enc->overscan);
return 0;
}
static int nv17_tv_set_property(struct drm_encoder *encoder,
struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_mode_config *conf = &encoder->dev->mode_config;
struct drm_crtc *crtc = encoder->crtc;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
bool modes_changed = false;
if (property == conf->tv_overscan_property) {
tv_enc->overscan = val;
if (encoder->crtc) {
if (tv_norm->kind == CTV_ENC_MODE)
nv17_ctv_update_rescaler(encoder);
else
nv17_tv_update_rescaler(encoder);
}
} else if (property == conf->tv_saturation_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->saturation = val;
nv17_tv_update_properties(encoder);
} else if (property == conf->tv_hue_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->hue = val;
nv17_tv_update_properties(encoder);
} else if (property == conf->tv_flicker_reduction_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->flicker = val;
if (encoder->crtc)
nv17_tv_update_rescaler(encoder);
} else if (property == conf->tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
tv_enc->tv_norm = val;
modes_changed = true;
} else if (property == conf->tv_select_subconnector_property) {
if (tv_norm->kind != TV_ENC_MODE)
return -EINVAL;
tv_enc->select_subconnector = val;
nv17_tv_update_properties(encoder);
} else {
return -EINVAL;
}
if (modes_changed) {
drm_helper_probe_single_connector_modes(connector, 0, 0);
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc) {
struct drm_mode_set modeset = {
.crtc = crtc,
};
crtc->funcs->set_config(&modeset);
}
}
return 0;
}
static void nv17_tv_destroy(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
NV_DEBUG_KMS(encoder->dev, "\n");
drm_encoder_cleanup(encoder);
kfree(tv_enc);
}
static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.dpms = nv17_tv_dpms,
.save = nv17_tv_save,
.restore = nv17_tv_restore,
.mode_fixup = nv17_tv_mode_fixup,
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
.mode_set = nv17_tv_mode_set,
.detect = nv17_tv_detect,
};
static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
.get_modes = nv17_tv_get_modes,
.mode_valid = nv17_tv_mode_valid,
.create_resources = nv17_tv_create_resources,
.set_property = nv17_tv_set_property,
};
static struct drm_encoder_funcs nv17_tv_funcs = {
.destroy = nv17_tv_destroy,
};
int
nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
struct nv17_tv_encoder *tv_enc = NULL;
tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
if (!tv_enc)
return -ENOMEM;
tv_enc->overscan = 50;
tv_enc->flicker = 50;
tv_enc->saturation = 50;
tv_enc->hue = 0;
tv_enc->tv_norm = TV_NORM_PAL;
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
tv_enc->pin_mask = 0;
encoder = to_drm_encoder(&tv_enc->base);
tv_enc->base.dcb = entry;
tv_enc->base.or = ffs(entry->or) - 1;
drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
nv17_tv_create_resources(encoder, connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
| gpl-2.0 |
fdroid/gp-peak-kernel | drivers/net/pci-skeleton.c | 2920 | 51033 | /*
drivers/net/pci-skeleton.c
Maintained by Jeff Garzik <jgarzik@pobox.com>
Original code came from 8139too.c, which in turns was based
originally on Donald Becker's rtl8139.c driver, versions 1.11
and older. This driver was originally based on rtl8139.c
version 1.07. Header of rtl8139.c version 1.11:
-----<snip>-----
Written 1997-2000 by Donald Becker.
This software may be used and distributed according to the
terms of the GNU General Public License (GPL), incorporated
herein by reference. Drivers based on or derived from this
code fall under the GPL and must retain the authorship,
copyright and license notice. This file is not a complete
program and may only be used when the entire operating
system is licensed under the GPL.
This driver is for boards based on the RTL8129 and RTL8139
PCI ethernet chips.
The author may be reached as becker@scyld.com, or C/O Scyld
Computing Corporation 410 Severn Ave., Suite 210 Annapolis
MD 21403
Support and updates available at
http://www.scyld.com/network/rtl8139.html
Twister-tuning table provided by Kinston
<shangh@realtek.com.tw>.
-----<snip>-----
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
-----------------------------------------------------------------------------
Theory of Operation
I. Board Compatibility
This device driver is designed for the RealTek RTL8139 series, the RealTek
Fast Ethernet controllers for PCI and CardBus. This chip is used on many
low-end boards, sometimes with its markings changed.
II. Board-specific settings
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS will assign the
PCI INTA signal to a (preferably otherwise unused) system IRQ line.
III. Driver operation
IIIa. Rx Ring buffers
The receive unit uses a single linear ring buffer rather than the more
common (and more efficient) descriptor-based architecture. Incoming frames
are sequentially stored into the Rx region, and the host copies them into
skbuffs.
Comment: While it is theoretically possible to process many frames in place,
any delay in Rx processing would cause us to drop frames. More importantly,
the Linux protocol stack is not designed to operate in this manner.
IIIb. Tx operation
The RTL8139 uses a fixed set of four Tx descriptors in register space.
In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
aligns the IP header on word boundaries, and 14 byte ethernet header means
that almost all frames will need to be copied to an alignment buffer.
IVb. References
http://www.realtek.com.tw/
http://www.scyld.com/expert/NWay.html
IVc. Errata
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/io.h>
#define NETDRV_VERSION "1.0.1"
#define MODNAME "netdrv"
#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
static char version[] __devinitdata =
KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
" Support available from http://foo.com/bar/baz.html\n";
/* define to 1 to enable PIO instead of MMIO */
#undef USE_IO_OPS
/* define to 1 to enable copious debugging info */
#undef NETDRV_DEBUG
/* define to 1 to disable lightweight runtime debugging checks */
#undef NETDRV_NDEBUG
#ifdef NETDRV_DEBUG
/* note: prints function name for you */
#define DPRINTK(fmt, args...) \
printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
#define DPRINTK(fmt, args...) \
do { \
if (0) \
printk(KERN_DEBUG fmt, ##args); \
} while (0)
#endif
#ifdef NETDRV_NDEBUG
#define assert(expr) do {} while (0)
#else
#define assert(expr) \
if (!(expr)) { \
printk("Assertion failed! %s,%s,%s,line=%d\n", \
#expr, __FILE__, __func__, __LINE__); \
}
#endif
/* A few user-configurable values. */
/* media options */
static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
/* Size of the in-memory receive ring. */
#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
#define RX_BUF_PAD 16
#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
#define MAX_ETH_FRAME_SIZE 1536
/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
/* PCI Tuning Parameters
Threshold is bytes transferred to chip before transmission starts. */
#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
/* The following settings are log_2(bytes)-4:
0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
*/
#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6 * HZ)
enum {
HAS_CHIP_XCVR = 0x020000,
HAS_LNK_CHNG = 0x040000,
};
#define NETDRV_MIN_IO_SIZE 0x80
#define RTL8139B_IO_SIZE 256
#define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
typedef enum {
RTL8139 = 0,
NETDRV_CB,
SMC1211TX,
/*MPX5030,*/
DELTA8139,
ADDTRON8139,
} board_t;
/* indexed by board_t, above */
static struct {
const char *name;
} board_info[] __devinitdata = {
{ "RealTek RTL8139 Fast Ethernet" },
{ "RealTek RTL8139B PCI/CardBus" },
{ "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
{ "Delta Electronics 8139 10/100BaseTX" },
{ "Addtron Technology 8139 10/100BaseTX" },
};
static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
/* {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
{0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
{0,}
};
MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
/* The rest of these values should never change. */
/* Symbolic offsets to registers. */
enum NETDRV_registers {
MAC0 = 0, /* Ethernet hardware address. */
MAR0 = 8, /* Multicast filter. */
TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
RxBuf = 0x30,
RxEarlyCnt = 0x34,
RxEarlyStatus = 0x36,
ChipCmd = 0x37,
RxBufPtr = 0x38,
RxBufAddr = 0x3A,
IntrMask = 0x3C,
IntrStatus = 0x3E,
TxConfig = 0x40,
ChipVersion = 0x43,
RxConfig = 0x44,
Timer = 0x48, /* A general-purpose counter. */
RxMissed = 0x4C, /* 24 bits valid, write clears. */
Cfg9346 = 0x50,
Config0 = 0x51,
Config1 = 0x52,
FlashReg = 0x54,
MediaStatus = 0x58,
Config3 = 0x59,
Config4 = 0x5A, /* absent on RTL-8139A */
HltClk = 0x5B,
MultiIntr = 0x5C,
TxSummary = 0x60,
BasicModeCtrl = 0x62,
BasicModeStatus = 0x64,
NWayAdvert = 0x66,
NWayLPAR = 0x68,
NWayExpansion = 0x6A,
/* Undocumented registers, but required for proper operation. */
FIFOTMS = 0x70, /* FIFO Control and test. */
CSCR = 0x74, /* Chip Status and Configuration Register. */
PARA78 = 0x78,
PARA7c = 0x7c, /* Magic transceiver parameter register. */
Config5 = 0xD8, /* absent on RTL-8139A */
};
enum ClearBitMasks {
MultiIntrClear = 0xF000,
ChipCmdClear = 0xE2,
Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
};
enum ChipCmdBits {
CmdReset = 0x10,
CmdRxEnb = 0x08,
CmdTxEnb = 0x04,
RxBufEmpty = 0x01,
};
/* Interrupt register bits, using my own meaningful names. */
enum IntrStatusBits {
PCIErr = 0x8000,
PCSTimeout = 0x4000,
RxFIFOOver = 0x40,
RxUnderrun = 0x20,
RxOverflow = 0x10,
TxErr = 0x08,
TxOK = 0x04,
RxErr = 0x02,
RxOK = 0x01,
};
enum TxStatusBits {
TxHostOwns = 0x2000,
TxUnderrun = 0x4000,
TxStatOK = 0x8000,
TxOutOfWindow = 0x20000000,
TxAborted = 0x40000000,
TxCarrierLost = 0x80000000,
};
enum RxStatusBits {
RxMulticast = 0x8000,
RxPhysical = 0x4000,
RxBroadcast = 0x2000,
RxBadSymbol = 0x0020,
RxRunt = 0x0010,
RxTooLong = 0x0008,
RxCRCErr = 0x0004,
RxBadAlign = 0x0002,
RxStatusOK = 0x0001,
};
/* Bits in RxConfig. */
enum rx_mode_bits {
AcceptErr = 0x20,
AcceptRunt = 0x10,
AcceptBroadcast = 0x08,
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
};
/* Bits in TxConfig. */
enum tx_config_bits {
TxIFG1 = (1 << 25), /* Interframe Gap Time */
TxIFG0 = (1 << 24), /* Enabling these bits violates IEEE 802.3 */
TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
TxClearAbt = (1 << 0), /* Clear abort (WO) */
TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
};
/* Bits in Config1 */
enum Config1Bits {
Cfg1_PM_Enable = 0x01,
Cfg1_VPD_Enable = 0x02,
Cfg1_PIO = 0x04,
Cfg1_MMIO = 0x08,
Cfg1_LWAKE = 0x10,
Cfg1_Driver_Load = 0x20,
Cfg1_LED0 = 0x40,
Cfg1_LED1 = 0x80,
};
enum RxConfigBits {
/* Early Rx threshold, none or X/16 */
RxCfgEarlyRxNone = 0,
RxCfgEarlyRxShift = 24,
/* rx fifo threshold */
RxCfgFIFOShift = 13,
RxCfgFIFONone = (7 << RxCfgFIFOShift),
/* Max DMA burst */
RxCfgDMAShift = 8,
RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
/* rx ring buffer length */
RxCfgRcv8K = 0,
RxCfgRcv16K = (1 << 11),
RxCfgRcv32K = (1 << 12),
RxCfgRcv64K = (1 << 11) | (1 << 12),
/* Disable packet wrap at end of Rx buffer */
RxNoWrap = (1 << 7),
};
/* Twister tuning parameters from RealTek.
Completely undocumented, but required to tune bad links. */
enum CSCRBits {
CSCR_LinkOKBit = 0x0400,
CSCR_LinkChangeBit = 0x0800,
CSCR_LinkStatusBits = 0x0f000,
CSCR_LinkDownOffCmd = 0x003c0,
CSCR_LinkDownCmd = 0x0f3c0,
};
enum Cfg9346Bits {
Cfg9346_Lock = 0x00,
Cfg9346_Unlock = 0xC0,
};
#define PARA78_default 0x78fa8388
#define PARA7c_default 0xcb38de43 /* param[0][3] */
#define PARA7c_xxx 0xcb38de43
static const unsigned long param[4][4] = {
{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
};
struct ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
typedef enum {
CH_8139 = 0,
CH_8139_K,
CH_8139A,
CH_8139B,
CH_8130,
CH_8139C,
} chip_t;
/* directly indexed by chip_t, above */
static const struct {
const char *name;
u8 version; /* from RTL8139C docs */
u32 RxConfigMask; /* should clear the bits supported by this chip */
} rtl_chip_info[] = {
{ "RTL-8139",
0x40,
0xf0fe0040, /* XXX copied from RTL8139A, verify */
},
{ "RTL-8139 rev K",
0x60,
0xf0fe0040,
},
{ "RTL-8139A",
0x70,
0xf0fe0040,
},
{ "RTL-8139B",
0x78,
0xf0fc0040
},
{ "RTL-8130",
0x7C,
0xf0fe0040, /* XXX copied from RTL8139A, verify */
},
{ "RTL-8139C",
0x74,
0xf0fc0040, /* XXX copied from RTL8139B, verify */
},
};
struct netdrv_private {
board_t board;
void *mmio_addr;
int drv_flags;
struct pci_dev *pci_dev;
struct timer_list timer; /* Media selection timer. */
unsigned char *rx_ring;
unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
unsigned int tx_flag;
atomic_t cur_tx;
atomic_t dirty_tx;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct ring_info tx_info[NUM_TX_DESC];
unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
unsigned char *tx_bufs; /* Tx bounce buffer region. */
dma_addr_t rx_ring_dma;
dma_addr_t tx_bufs_dma;
char phys[4]; /* MII device addresses. */
char twistie, twist_row, twist_col; /* Twister tune state. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
unsigned int default_port:4; /* Last dev->if_port value. */
unsigned int media2:4; /* Secondary monitored media port. */
unsigned int medialock:1; /* Don't sense media type. */
unsigned int mediasense:1; /* Media sensing in progress. */
spinlock_t lock;
chip_t chipset;
};
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
module_param(multicast_filter_limit, int, 0);
module_param(max_interrupt_work, int, 0);
module_param_array(media, int, NULL, 0);
MODULE_PARM_DESC(multicast_filter_limit,
MODNAME " maximum number of filtered multicast addresses");
MODULE_PARM_DESC(max_interrupt_work,
MODNAME " maximum events handled per interrupt");
MODULE_PARM_DESC(media,
MODNAME " Bits 0-3: media type, bit 17: full duplex");
static int read_eeprom(void *ioaddr, int location, int addr_len);
static int netdrv_open(struct net_device *dev);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location,
int val);
static void netdrv_timer(unsigned long data);
static void netdrv_tx_timeout(struct net_device *dev);
static void netdrv_init_ring(struct net_device *dev);
static int netdrv_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
static int netdrv_close(struct net_device *dev);
static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void netdrv_set_rx_mode(struct net_device *dev);
static void netdrv_hw_start(struct net_device *dev);
#ifdef USE_IO_OPS
#define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
#define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
#define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
#define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
#define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
#define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
#define NETDRV_W8_F NETDRV_W8
#define NETDRV_W16_F NETDRV_W16
#define NETDRV_W32_F NETDRV_W32
#undef readb
#undef readw
#undef readl
#undef writeb
#undef writew
#undef writel
#define readb(addr) inb((unsigned long)(addr))
#define readw(addr) inw((unsigned long)(addr))
#define readl(addr) inl((unsigned long)(addr))
#define writeb(val, addr) outb((val), (unsigned long)(addr))
#define writew(val, addr) outw((val), (unsigned long)(addr))
#define writel(val, addr) outl((val), (unsigned long)(addr))
#else
/* write MMIO register, with flush */
/* Flush avoids rtl8139 bug w/ posted MMIO writes */
#define NETDRV_W8_F(reg, val8) \
do { \
writeb((val8), ioaddr + (reg)); \
readb(ioaddr + (reg)); \
} while (0)
#define NETDRV_W16_F(reg, val16) \
do { \
writew((val16), ioaddr + (reg)); \
readw(ioaddr + (reg)); \
} while (0)
#define NETDRV_W32_F(reg, val32) \
do { \
writel((val32), ioaddr + (reg)); \
readl(ioaddr + (reg)); \
} while (0)
#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
#define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
#define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
#define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
#else
/* write MMIO register, then flush */
#define NETDRV_W8 NETDRV_W8_F
#define NETDRV_W16 NETDRV_W16_F
#define NETDRV_W32 NETDRV_W32_F
#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
/* read MMIO register */
#define NETDRV_R8(reg) readb(ioaddr + (reg))
#define NETDRV_R16(reg) readw(ioaddr + (reg))
#define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
#endif /* USE_IO_OPS */
static const u16 netdrv_intr_mask =
PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
TxErr | TxOK | RxErr | RxOK;
static const unsigned int netdrv_rx_config =
RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
(RX_FIFO_THRESH << RxCfgFIFOShift) |
(RX_DMA_BURST << RxCfgDMAShift);
static int __devinit netdrv_init_board(struct pci_dev *pdev,
struct net_device **dev_out,
void **ioaddr_out)
{
void *ioaddr = NULL;
struct net_device *dev;
struct netdrv_private *tp;
int rc, i;
u32 pio_start, pio_end, pio_flags, pio_len;
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
u32 tmp;
DPRINTK("ENTER\n");
assert(pdev != NULL);
assert(ioaddr_out != NULL);
*ioaddr_out = NULL;
*dev_out = NULL;
/* dev zeroed in alloc_etherdev */
dev = alloc_etherdev(sizeof(*tp));
if (dev == NULL) {
dev_err(&pdev->dev, "unable to alloc new ethernet\n");
DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
/* enable device(incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc)
goto err_out;
pio_start = pci_resource_start(pdev, 0);
pio_end = pci_resource_end(pdev, 0);
pio_flags = pci_resource_flags(pdev, 0);
pio_len = pci_resource_len(pdev, 0);
mmio_start = pci_resource_start(pdev, 1);
mmio_end = pci_resource_end(pdev, 1);
mmio_flags = pci_resource_flags(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
/* set this immediately, we need to know before
* we talk to the chip directly */
DPRINTK("PIO region size == %#02X\n", pio_len);
DPRINTK("MMIO region size == %#02lX\n", mmio_len);
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
/* make sure PCI base addr 1 is MMIO */
if (!(mmio_flags & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
rc = -ENODEV;
goto err_out;
}
/* check for weird/broken PCI region reporting */
if ((pio_len < NETDRV_MIN_IO_SIZE) ||
(mmio_len < NETDRV_MIN_IO_SIZE)) {
dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out;
}
rc = pci_request_regions(pdev, MODNAME);
if (rc)
goto err_out;
pci_set_master(pdev);
#ifdef USE_IO_OPS
ioaddr = (void *)pio_start;
#else
/* ioremap MMIO region */
ioaddr = ioremap(mmio_start, mmio_len);
if (ioaddr == NULL) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res;
}
#endif /* USE_IO_OPS */
/* Soft reset the chip. */
NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
else
udelay(10);
/* Bring the chip out of low-power mode. */
/* <insert device-specific code here> */
#ifndef USE_IO_OPS
/* sanity checks -- ensure PIO and MMIO registers agree */
assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
#endif /* !USE_IO_OPS */
/* identify chip attached to board */
tmp = NETDRV_R8(ChipVersion);
for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
if (tmp == rtl_chip_info[i].version) {
tp->chipset = i;
goto match;
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
dev_printk(KERN_DEBUG, &pdev->dev,
"unknown chip version, assuming RTL-8139\n");
dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
NETDRV_R32(TxConfig));
tp->chipset = 0;
match:
DPRINTK("chipset id(%d) == index %d, '%s'\n",
tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
rc = register_netdev(dev);
if (rc)
goto err_out_unmap;
DPRINTK("EXIT, returning 0\n");
*ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
err_out_unmap:
#ifndef USE_IO_OPS
iounmap(ioaddr);
err_out_free_res:
#endif
pci_release_regions(pdev);
err_out:
free_netdev(dev);
DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
static const struct net_device_ops netdrv_netdev_ops = {
.ndo_open = netdrv_open,
.ndo_stop = netdrv_close,
.ndo_start_xmit = netdrv_start_xmit,
.ndo_set_multicast_list = netdrv_set_rx_mode,
.ndo_do_ioctl = netdrv_ioctl,
.ndo_tx_timeout = netdrv_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
static int __devinit netdrv_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct netdrv_private *tp;
int i, addr_len, option;
void *ioaddr = NULL;
static int board_idx = -1;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
static int printed_version;
if (!printed_version++)
printk(version);
#endif
DPRINTK("ENTER\n");
assert(pdev != NULL);
assert(ent != NULL);
board_idx++;
i = netdrv_init_board(pdev, &dev, &ioaddr);
if (i < 0) {
DPRINTK("EXIT, returning %d\n", i);
return i;
}
tp = netdev_priv(dev);
assert(ioaddr != NULL);
assert(dev != NULL);
assert(tp != NULL);
addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
((u16 *)(dev->dev_addr))[i] =
le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
dev->netdev_ops = &netdrv_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
/* netdev_priv()/tp zeroed and aligned in alloc_etherdev */
tp = netdev_priv(dev);
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | NETDRV_CAPS;
tp->pci_dev = pdev;
tp->board = ent->driver_data;
tp->mmio_addr = ioaddr;
spin_lock_init(&tp->lock);
pci_set_drvdata(pdev, dev);
tp->phys[0] = 32;
netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
board_info[ent->driver_data].name,
dev->base_addr, dev->dev_addr, dev->irq);
netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
rtl_chip_info[tp->chipset].name);
/* Put the chip into low-power mode. */
NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
/* The lower four bits are the media type. */
option = (board_idx > 7) ? 0 : media[board_idx];
if (option > 0) {
tp->full_duplex = (option & 0x200) ? 1 : 0;
tp->default_port = option & 15;
if (tp->default_port)
tp->medialock = 1;
}
if (tp->full_duplex) {
netdev_info(dev, "Media type forced to Full Duplex\n");
mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
tp->duplex_lock = 1;
}
DPRINTK("EXIT - returning 0\n");
return 0;
}
static void __devexit netdrv_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *np;
DPRINTK("ENTER\n");
assert(dev != NULL);
np = netdev_priv(dev);
assert(np != NULL);
unregister_netdev(dev);
#ifndef USE_IO_OPS
iounmap(np->mmio_addr);
#endif /* !USE_IO_OPS */
pci_release_regions(pdev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
DPRINTK("EXIT\n");
}
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
#define EE_WRITE_0 0x00
#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
#define EE_ENB (0x80 | EE_CS)
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
#define eeprom_delay() readl(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
{
int i;
unsigned retval = 0;
void *ee_addr = ioaddr + Cfg9346;
int read_cmd = location | (EE_READ_CMD << addr_len);
DPRINTK("ENTER\n");
writeb(EE_ENB & ~EE_CS, ee_addr);
writeb(EE_ENB, ee_addr);
eeprom_delay();
/* Shift the read command bits out. */
for (i = 4 + addr_len; i >= 0; i--) {
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
writeb(EE_ENB | dataval, ee_addr);
eeprom_delay();
writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
eeprom_delay();
}
writeb(EE_ENB, ee_addr);
eeprom_delay();
for (i = 16; i > 0; i--) {
writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
eeprom_delay();
retval =
(retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
0);
writeb(EE_ENB, ee_addr);
eeprom_delay();
}
/* Terminate the EEPROM access. */
writeb(~EE_CS, ee_addr);
eeprom_delay();
DPRINTK("EXIT - returning %d\n", retval);
return retval;
}
/* MII serial management: mostly bogus for now. */
/* Read and write the MII management registers using software-generated
serial MDIO protocol.
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
#define MDIO_DIR 0x80
#define MDIO_DATA_OUT 0x04
#define MDIO_DATA_IN 0x02
#define MDIO_CLK 0x01
#define MDIO_WRITE0 (MDIO_DIR)
#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
#define mdio_delay() readb(mdio_addr)
static char mii_2_8139_map[8] = {
BasicModeCtrl,
BasicModeStatus,
0,
0,
NWayAdvert,
NWayLPAR,
NWayExpansion,
0
};
/* Syncronize the MII management interface by shifting 32 one bits out. */
static void mdio_sync(void *mdio_addr)
{
int i;
DPRINTK("ENTER\n");
for (i = 32; i >= 0; i--) {
writeb(MDIO_WRITE1, mdio_addr);
mdio_delay();
writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
mdio_delay();
}
DPRINTK("EXIT\n");
}
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
int i;
DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
DPRINTK("EXIT after directly using 8139 internal regs\n");
return location < 8 && mii_2_8139_map[location] ?
readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
}
mdio_sync(mdio_addr);
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
writeb(MDIO_DIR | dataval, mdio_addr);
mdio_delay();
writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
mdio_delay();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
writeb(0, mdio_addr);
mdio_delay();
retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
? 1 : 0);
writeb(MDIO_CLK, mdio_addr);
mdio_delay();
}
DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
return (retval >> 1) & 0xffff;
}
static void mdio_write(struct net_device *dev, int phy_id, int location,
int value)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
(0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
int i;
DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
if (location < 8 && mii_2_8139_map[location]) {
writew(value,
tp->mmio_addr + mii_2_8139_map[location]);
readw(tp->mmio_addr + mii_2_8139_map[location]);
}
DPRINTK("EXIT after directly using 8139 internal regs\n");
return;
}
mdio_sync(mdio_addr);
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval =
(mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
writeb(dataval, mdio_addr);
mdio_delay();
writeb(dataval | MDIO_CLK, mdio_addr);
mdio_delay();
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
writeb(0, mdio_addr);
mdio_delay();
writeb(MDIO_CLK, mdio_addr);
mdio_delay();
}
DPRINTK("EXIT\n");
}
static int netdrv_open(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int retval;
void *ioaddr = tp->mmio_addr;
DPRINTK("ENTER\n");
retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
DPRINTK("EXIT, returning %d\n", retval);
return retval;
}
tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
&tp->tx_bufs_dma);
tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
&tp->rx_ring_dma);
if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
free_irq(dev->irq, dev);
if (tp->tx_bufs)
pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
tp->tx_bufs, tp->tx_bufs_dma);
if (tp->rx_ring)
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
tp->full_duplex = tp->duplex_lock;
tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
netdrv_init_ring(dev);
netdrv_hw_start(dev);
netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
(unsigned long long)pci_resource_start(tp->pci_dev, 1),
dev->irq, NETDRV_R8(MediaStatus),
tp->full_duplex ? "full" : "half");
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
init_timer(&tp->timer);
tp->timer.expires = jiffies + 3 * HZ;
tp->timer.data = (unsigned long) dev;
tp->timer.function = netdrv_timer;
add_timer(&tp->timer);
DPRINTK("EXIT, returning 0\n");
return 0;
}
/* Start the hardware at open or resume. */
static void netdrv_hw_start(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
DPRINTK("ENTER\n");
/* Soft reset the chip. */
NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
udelay(100);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
/* Restore our idea of the MAC address. */
NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
CmdRxEnb | CmdTxEnb);
i = netdrv_rx_config |
(NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
NETDRV_W32_F(RxConfig, i);
/* Check this value: the documentation for IFG contradicts ifself. */
NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
/* unlock Config[01234] and BMCR register writes */
NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
udelay(10);
tp->cur_rx = 0;
/* Lock Config[01234] and BMCR register writes */
NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
udelay(10);
/* init Rx ring buffer DMA address */
NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
NETDRV_W32_F(RxMissed, 0);
netdrv_set_rx_mode(dev);
/* no early-rx interrupts */
NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
/* make sure RxTx has started */
NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
CmdRxEnb | CmdTxEnb);
/* Enable all known interrupts by setting the interrupt mask. */
NETDRV_W16_F(IntrMask, netdrv_intr_mask);
netif_start_queue(dev);
DPRINTK("EXIT\n");
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void netdrv_init_ring(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int i;
DPRINTK("ENTER\n");
tp->cur_rx = 0;
atomic_set(&tp->cur_tx, 0);
atomic_set(&tp->dirty_tx, 0);
for (i = 0; i < NUM_TX_DESC; i++) {
tp->tx_info[i].skb = NULL;
tp->tx_info[i].mapping = 0;
tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
}
DPRINTK("EXIT\n");
}
static void netdrv_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ;
int mii_lpa;
mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
if (!tp->duplex_lock && mii_lpa != 0xffff) {
int duplex = ((mii_lpa & LPA_100FULL) ||
(mii_lpa & 0x01C0) == 0x0040);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
tp->full_duplex ? "full" : "half",
tp->phys[0], mii_lpa);
NETDRV_W8(Cfg9346, Cfg9346_Unlock);
NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
}
netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
NETDRV_R16(NWayLPAR));
netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
NETDRV_R16(IntrMask),
NETDRV_R16(IntrStatus),
NETDRV_R32(RxEarlyStatus));
netdev_dbg(dev, "Chip config %02x %02x\n",
NETDRV_R8(Config0), NETDRV_R8(Config1));
tp->timer.expires = jiffies + next_tick;
add_timer(&tp->timer);
}
static void netdrv_tx_clear(struct net_device *dev)
{
int i;
struct netdrv_private *tp = netdev_priv(dev);
atomic_set(&tp->cur_tx, 0);
atomic_set(&tp->dirty_tx, 0);
/* Dump the unsent Tx packets. */
for (i = 0; i < NUM_TX_DESC; i++) {
struct ring_info *rp = &tp->tx_info[i];
if (rp->mapping != 0) {
pci_unmap_single(tp->pci_dev, rp->mapping,
rp->skb->len, PCI_DMA_TODEVICE);
rp->mapping = 0;
}
if (rp->skb) {
dev_kfree_skb(rp->skb);
rp->skb = NULL;
dev->stats.tx_dropped++;
}
}
}
static void netdrv_tx_timeout(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
unsigned long flags;
netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
NETDRV_R8(ChipCmd),
NETDRV_R16(IntrStatus),
NETDRV_R8(MediaStatus));
/* disable Tx ASAP, if not already */
tmp8 = NETDRV_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
/* Disable interrupts by clearing the interrupt mask. */
NETDRV_W16(IntrMask, 0x0000);
/* Emit info to figure out what went wrong. */
netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
atomic_read(&tp->cur_tx),
atomic_read(&tp->dirty_tx));
for (i = 0; i < NUM_TX_DESC; i++)
netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
i, NETDRV_R32(TxStatus0 + (i * 4)),
i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
"(queue head)" : "");
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irqsave(&tp->lock, flags);
netdrv_tx_clear(dev);
spin_unlock_irqrestore(&tp->lock, flags);
/* ...and finally, reset everything */
netdrv_hw_start(dev);
netif_wake_queue(dev);
}
static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
/* Calculate the next Tx descriptor entry. */
entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
assert(tp->tx_info[entry].skb == NULL);
assert(tp->tx_info[entry].mapping == 0);
tp->tx_info[entry].skb = skb;
/* tp->tx_info[entry].mapping = 0; */
skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
/* Note: the chip doesn't have auto-pad! */
NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
atomic_inc(&tp->cur_tx);
if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
netif_stop_queue(dev);
netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
skb->data, skb->len, entry);
return NETDEV_TX_OK;
}
static void netdrv_tx_interrupt(struct net_device *dev,
struct netdrv_private *tp,
void *ioaddr)
{
int cur_tx, dirty_tx, tx_left;
assert(dev != NULL);
assert(tp != NULL);
assert(ioaddr != NULL);
dirty_tx = atomic_read(&tp->dirty_tx);
cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
while (tx_left > 0) {
int entry = dirty_tx % NUM_TX_DESC;
int txstatus;
txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
dev->stats.tx_aborted_errors++;
NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
}
if (txstatus & TxCarrierLost)
dev->stats.tx_carrier_errors++;
if (txstatus & TxOutOfWindow)
dev->stats.tx_window_errors++;
} else {
if (txstatus & TxUnderrun) {
/* Add 64 to the Tx FIFO threshold. */
if (tp->tx_flag < 0x00300000)
tp->tx_flag += 0x00020000;
dev->stats.tx_fifo_errors++;
}
dev->stats.collisions += (txstatus >> 24) & 15;
dev->stats.tx_bytes += txstatus & 0x7ff;
dev->stats.tx_packets++;
}
/* Free the original skb. */
if (tp->tx_info[entry].mapping != 0) {
pci_unmap_single(tp->pci_dev,
tp->tx_info[entry].mapping,
tp->tx_info[entry].skb->len,
PCI_DMA_TODEVICE);
tp->tx_info[entry].mapping = 0;
}
dev_kfree_skb_irq(tp->tx_info[entry].skb);
tp->tx_info[entry].skb = NULL;
dirty_tx++;
if (dirty_tx < 0) { /* handle signed int overflow */
atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
dirty_tx = cur_tx - tx_left + 1;
}
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
}
#ifndef NETDRV_NDEBUG
if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
dirty_tx, atomic_read(&tp->cur_tx));
dirty_tx += NUM_TX_DESC;
}
#endif /* NETDRV_NDEBUG */
atomic_set(&tp->dirty_tx, dirty_tx);
}
/* TODO: clean this up! Rx reset need not be this intensive */
static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
struct netdrv_private *tp, void *ioaddr)
{
u8 tmp8;
int tmp_work = 1000;
netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
if (rx_status & RxTooLong)
netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
rx_status);
/* A.C.: The chip hangs here. */
dev->stats.rx_errors++;
if (rx_status & (RxBadSymbol | RxBadAlign))
dev->stats.rx_frame_errors++;
if (rx_status & (RxRunt | RxTooLong))
dev->stats.rx_length_errors++;
if (rx_status & RxCRCErr)
dev->stats.rx_crc_errors++;
/* Reset the receiver, based on RealTek recommendation.(Bug?) */
tp->cur_rx = 0;
/* disable receive */
tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
/* A.C.: Reset the multicast list. */
netdrv_set_rx_mode(dev);
/* XXX potentially temporary hack to
* restart hung receiver */
while (--tmp_work > 0) {
tmp8 = NETDRV_R8(ChipCmd);
if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
break;
NETDRV_W8_F(ChipCmd,
(tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
}
/* G.S.: Re-enable receiver */
/* XXX temporary hack to work around receiver hang */
netdrv_set_rx_mode(dev);
if (tmp_work <= 0)
netdev_warn(dev, "tx/rx enable wait too long\n");
}
/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
field alignments and semantics. */
static void netdrv_rx_interrupt(struct net_device *dev,
struct netdrv_private *tp, void *ioaddr)
{
unsigned char *rx_ring;
u16 cur_rx;
assert(dev != NULL);
assert(tp != NULL);
assert(ioaddr != NULL);
rx_ring = tp->rx_ring;
cur_rx = tp->cur_rx;
netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
cur_rx, NETDRV_R16(RxBufAddr),
NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
int ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int rx_size;
unsigned int pkt_size;
struct sk_buff *skb;
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
rx_status, rx_size, cur_rx);
#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
&rx_ring[ring_offset], 70);
#endif
/* If Rx err or invalid rx_size/rx_status received
*(which happens if we get lost in the ring),
* Rx process gets reset, so we abort any further
* Rx processing.
*/
if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
(!(rx_status & RxStatusOK))) {
netdrv_rx_err(rx_status, dev, tp, ioaddr);
return;
}
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
/* TODO: consider allocating skb's outside of
* interrupt context, both to speed interrupt processing,
* and also to reduce the chances of having to
* drop packets here under memory pressure.
*/
skb = dev_alloc_skb(pkt_size + 2);
if (skb) {
skb_reserve(skb, 2); /* 16 byte align the IP fields. */
skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_bytes += pkt_size;
dev->stats.rx_packets++;
} else {
netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
NETDRV_W16_F(RxBufPtr, cur_rx - 16);
}
netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
cur_rx, NETDRV_R16(RxBufAddr),
NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
tp->cur_rx = cur_rx;
}
static void netdrv_weird_interrupt(struct net_device *dev,
struct netdrv_private *tp,
void *ioaddr,
int status, int link_changed)
{
netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
status);
assert(dev != NULL);
assert(tp != NULL);
assert(ioaddr != NULL);
/* Update the error count. */
dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
NETDRV_W32(RxMissed, 0);
if ((status & RxUnderrun) && link_changed &&
(tp->drv_flags & HAS_LNK_CHNG)) {
/* Really link-change on new chips. */
int lpar = NETDRV_R16(NWayLPAR);
int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
tp->duplex_lock);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
NETDRV_W8(Cfg9346, Cfg9346_Unlock);
NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
status &= ~RxUnderrun;
}
/* XXX along with netdrv_rx_err, are we double-counting errors? */
if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
dev->stats.rx_errors++;
if (status & (PCSTimeout))
dev->stats.rx_length_errors++;
if (status & (RxUnderrun | RxFIFOOver))
dev->stats.rx_fifo_errors++;
if (status & RxOverflow) {
dev->stats.rx_over_errors++;
tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
}
if (status & PCIErr) {
u16 pci_cmd_status;
pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
}
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
struct netdrv_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
int handled = 0;
spin_lock(&tp->lock);
do {
status = NETDRV_R16(IntrStatus);
/* h/w no longer present(hotplug?) or major error, bail */
if (status == 0xFFFF)
break;
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP */
NETDRV_W16_F(IntrStatus, status);
netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
status, NETDRV_R16(IntrStatus));
if ((status &
(PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
break;
/* Check uncommon events with one test. */
if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
RxFIFOOver | TxErr | RxErr))
netdrv_weird_interrupt(dev, tp, ioaddr,
status, link_changed);
if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
netdrv_rx_interrupt(dev, tp, ioaddr);
if (status & (TxOK | TxErr))
netdrv_tx_interrupt(dev, tp, ioaddr);
boguscnt--;
} while (boguscnt > 0);
if (boguscnt <= 0) {
netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
status);
/* Clear all interrupt sources. */
NETDRV_W16(IntrStatus, 0xffff);
}
spin_unlock(&tp->lock);
netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
NETDRV_R16(IntrStatus));
return IRQ_RETVAL(handled);
}
static int netdrv_close(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
DPRINTK("ENTER\n");
netif_stop_queue(dev);
netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
NETDRV_R16(IntrStatus));
del_timer_sync(&tp->timer);
spin_lock_irqsave(&tp->lock, flags);
/* Stop the chip's Tx and Rx DMA processes. */
NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Disable interrupts by clearing the interrupt mask. */
NETDRV_W16(IntrMask, 0x0000);
/* Update the error counts. */
dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
NETDRV_W32(RxMissed, 0);
spin_unlock_irqrestore(&tp->lock, flags);
free_irq(dev->irq, dev);
netdrv_tx_clear(dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
tp->tx_bufs, tp->tx_bufs_dma);
tp->rx_ring = NULL;
tp->tx_bufs = NULL;
/* Green! Put the chip in low-power mode. */
NETDRV_W8(Cfg9346, Cfg9346_Unlock);
NETDRV_W8(Config1, 0x03);
NETDRV_W8(Cfg9346, Cfg9346_Lock);
DPRINTK("EXIT\n");
return 0;
}
static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
DPRINTK("ENTER\n");
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = tp->phys[0] & 0x3f;
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
spin_lock_irqsave(&tp->lock, flags);
data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
spin_unlock_irqrestore(&tp->lock, flags);
break;
case SIOCSMIIREG: /* Write MII PHY register. */
spin_lock_irqsave(&tp->lock, flags);
mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
spin_unlock_irqrestore(&tp->lock, flags);
break;
default:
rc = -EOPNOTSUPP;
break;
}
DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
static void netdrv_set_rx_mode(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
u32 tmp;
DPRINTK("ENTER\n");
netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
__func__, dev->flags, NETDRV_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
}
/* if called from irq handler, lock already acquired */
if (!in_irq())
spin_lock_irq(&tp->lock);
/* We can safely update without stopping the chip. */
tmp = netdrv_rx_config | rx_mode |
(NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
NETDRV_W32_F(RxConfig, tmp);
NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
if (!in_irq())
spin_unlock_irq(&tp->lock);
DPRINTK("EXIT\n");
}
#ifdef CONFIG_PM
static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
spin_lock_irqsave(&tp->lock, flags);
/* Disable interrupts, stop Tx and Rx. */
NETDRV_W16(IntrMask, 0x0000);
NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Update the error counts. */
dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
NETDRV_W32(RxMissed, 0);
spin_unlock_irqrestore(&tp->lock, flags);
pci_save_state(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int netdrv_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
/*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
netif_device_attach(dev);
netdrv_hw_start(dev);
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver netdrv_pci_driver = {
.name = MODNAME,
.id_table = netdrv_pci_tbl,
.probe = netdrv_init_one,
.remove = __devexit_p(netdrv_remove_one),
#ifdef CONFIG_PM
.suspend = netdrv_suspend,
.resume = netdrv_resume,
#endif /* CONFIG_PM */
};
static int __init netdrv_init_module(void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
printk(version);
#endif
return pci_register_driver(&netdrv_pci_driver);
}
static void __exit netdrv_cleanup_module(void)
{
pci_unregister_driver(&netdrv_pci_driver);
}
module_init(netdrv_init_module);
module_exit(netdrv_cleanup_module);
| gpl-2.0 |
kgp700/exyroid-sgs2-nics | sound/isa/sb/emu8000.c | 2920 | 36522 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* and (c) 1999 Steve Ratcliffe <steve@parabola.demon.co.uk>
* Copyright (C) 1999-2000 Takashi Iwai <tiwai@suse.de>
*
* Routines for control of EMU8000 chip
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/emu8000.h>
#include <sound/emu8000_reg.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/init.h>
#include <sound/control.h>
#include <sound/initval.h>
/*
* emu8000 register controls
*/
/*
* The following routines read and write registers on the emu8000. They
* should always be called via the EMU8000*READ/WRITE macros and never
* directly. The macros handle the port number and command word.
*/
/* Write a word */
void snd_emu8000_poke(struct snd_emu8000 *emu, unsigned int port, unsigned int reg, unsigned int val)
{
unsigned long flags;
spin_lock_irqsave(&emu->reg_lock, flags);
if (reg != emu->last_reg) {
outw((unsigned short)reg, EMU8000_PTR(emu)); /* Set register */
emu->last_reg = reg;
}
outw((unsigned short)val, port); /* Send data */
spin_unlock_irqrestore(&emu->reg_lock, flags);
}
/* Read a word */
unsigned short snd_emu8000_peek(struct snd_emu8000 *emu, unsigned int port, unsigned int reg)
{
unsigned short res;
unsigned long flags;
spin_lock_irqsave(&emu->reg_lock, flags);
if (reg != emu->last_reg) {
outw((unsigned short)reg, EMU8000_PTR(emu)); /* Set register */
emu->last_reg = reg;
}
res = inw(port); /* Read data */
spin_unlock_irqrestore(&emu->reg_lock, flags);
return res;
}
/* Write a double word */
void snd_emu8000_poke_dw(struct snd_emu8000 *emu, unsigned int port, unsigned int reg, unsigned int val)
{
unsigned long flags;
spin_lock_irqsave(&emu->reg_lock, flags);
if (reg != emu->last_reg) {
outw((unsigned short)reg, EMU8000_PTR(emu)); /* Set register */
emu->last_reg = reg;
}
outw((unsigned short)val, port); /* Send low word of data */
outw((unsigned short)(val>>16), port+2); /* Send high word of data */
spin_unlock_irqrestore(&emu->reg_lock, flags);
}
/* Read a double word */
unsigned int snd_emu8000_peek_dw(struct snd_emu8000 *emu, unsigned int port, unsigned int reg)
{
unsigned short low;
unsigned int res;
unsigned long flags;
spin_lock_irqsave(&emu->reg_lock, flags);
if (reg != emu->last_reg) {
outw((unsigned short)reg, EMU8000_PTR(emu)); /* Set register */
emu->last_reg = reg;
}
low = inw(port); /* Read low word of data */
res = low + (inw(port+2) << 16);
spin_unlock_irqrestore(&emu->reg_lock, flags);
return res;
}
/*
* Set up / close a channel to be used for DMA.
*/
/*exported*/ void
snd_emu8000_dma_chan(struct snd_emu8000 *emu, int ch, int mode)
{
unsigned right_bit = (mode & EMU8000_RAM_RIGHT) ? 0x01000000 : 0;
mode &= EMU8000_RAM_MODE_MASK;
if (mode == EMU8000_RAM_CLOSE) {
EMU8000_CCCA_WRITE(emu, ch, 0);
EMU8000_DCYSUSV_WRITE(emu, ch, 0x807F);
return;
}
EMU8000_DCYSUSV_WRITE(emu, ch, 0x80);
EMU8000_VTFT_WRITE(emu, ch, 0);
EMU8000_CVCF_WRITE(emu, ch, 0);
EMU8000_PTRX_WRITE(emu, ch, 0x40000000);
EMU8000_CPF_WRITE(emu, ch, 0x40000000);
EMU8000_PSST_WRITE(emu, ch, 0);
EMU8000_CSL_WRITE(emu, ch, 0);
if (mode == EMU8000_RAM_WRITE) /* DMA write */
EMU8000_CCCA_WRITE(emu, ch, 0x06000000 | right_bit);
else /* DMA read */
EMU8000_CCCA_WRITE(emu, ch, 0x04000000 | right_bit);
}
/*
*/
static void __devinit
snd_emu8000_read_wait(struct snd_emu8000 *emu)
{
while ((EMU8000_SMALR_READ(emu) & 0x80000000) != 0) {
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
}
}
/*
*/
static void __devinit
snd_emu8000_write_wait(struct snd_emu8000 *emu)
{
while ((EMU8000_SMALW_READ(emu) & 0x80000000) != 0) {
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
}
}
/*
* detect a card at the given port
*/
static int __devinit
snd_emu8000_detect(struct snd_emu8000 *emu)
{
/* Initialise */
EMU8000_HWCF1_WRITE(emu, 0x0059);
EMU8000_HWCF2_WRITE(emu, 0x0020);
EMU8000_HWCF3_WRITE(emu, 0x0000);
/* Check for a recognisable emu8000 */
/*
if ((EMU8000_U1_READ(emu) & 0x000f) != 0x000c)
return -ENODEV;
*/
if ((EMU8000_HWCF1_READ(emu) & 0x007e) != 0x0058)
return -ENODEV;
if ((EMU8000_HWCF2_READ(emu) & 0x0003) != 0x0003)
return -ENODEV;
snd_printdd("EMU8000 [0x%lx]: Synth chip found\n",
emu->port1);
return 0;
}
/*
* intiailize audio channels
*/
static void __devinit
init_audio(struct snd_emu8000 *emu)
{
int ch;
/* turn off envelope engines */
for (ch = 0; ch < EMU8000_CHANNELS; ch++)
EMU8000_DCYSUSV_WRITE(emu, ch, 0x80);
/* reset all other parameters to zero */
for (ch = 0; ch < EMU8000_CHANNELS; ch++) {
EMU8000_ENVVOL_WRITE(emu, ch, 0);
EMU8000_ENVVAL_WRITE(emu, ch, 0);
EMU8000_DCYSUS_WRITE(emu, ch, 0);
EMU8000_ATKHLDV_WRITE(emu, ch, 0);
EMU8000_LFO1VAL_WRITE(emu, ch, 0);
EMU8000_ATKHLD_WRITE(emu, ch, 0);
EMU8000_LFO2VAL_WRITE(emu, ch, 0);
EMU8000_IP_WRITE(emu, ch, 0);
EMU8000_IFATN_WRITE(emu, ch, 0);
EMU8000_PEFE_WRITE(emu, ch, 0);
EMU8000_FMMOD_WRITE(emu, ch, 0);
EMU8000_TREMFRQ_WRITE(emu, ch, 0);
EMU8000_FM2FRQ2_WRITE(emu, ch, 0);
EMU8000_PTRX_WRITE(emu, ch, 0);
EMU8000_VTFT_WRITE(emu, ch, 0);
EMU8000_PSST_WRITE(emu, ch, 0);
EMU8000_CSL_WRITE(emu, ch, 0);
EMU8000_CCCA_WRITE(emu, ch, 0);
}
for (ch = 0; ch < EMU8000_CHANNELS; ch++) {
EMU8000_CPF_WRITE(emu, ch, 0);
EMU8000_CVCF_WRITE(emu, ch, 0);
}
}
/*
* initialize DMA address
*/
static void __devinit
init_dma(struct snd_emu8000 *emu)
{
EMU8000_SMALR_WRITE(emu, 0);
EMU8000_SMARR_WRITE(emu, 0);
EMU8000_SMALW_WRITE(emu, 0);
EMU8000_SMARW_WRITE(emu, 0);
}
/*
* initialization arrays; from ADIP
*/
static unsigned short init1[128] /*__devinitdata*/ = {
0x03ff, 0x0030, 0x07ff, 0x0130, 0x0bff, 0x0230, 0x0fff, 0x0330,
0x13ff, 0x0430, 0x17ff, 0x0530, 0x1bff, 0x0630, 0x1fff, 0x0730,
0x23ff, 0x0830, 0x27ff, 0x0930, 0x2bff, 0x0a30, 0x2fff, 0x0b30,
0x33ff, 0x0c30, 0x37ff, 0x0d30, 0x3bff, 0x0e30, 0x3fff, 0x0f30,
0x43ff, 0x0030, 0x47ff, 0x0130, 0x4bff, 0x0230, 0x4fff, 0x0330,
0x53ff, 0x0430, 0x57ff, 0x0530, 0x5bff, 0x0630, 0x5fff, 0x0730,
0x63ff, 0x0830, 0x67ff, 0x0930, 0x6bff, 0x0a30, 0x6fff, 0x0b30,
0x73ff, 0x0c30, 0x77ff, 0x0d30, 0x7bff, 0x0e30, 0x7fff, 0x0f30,
0x83ff, 0x0030, 0x87ff, 0x0130, 0x8bff, 0x0230, 0x8fff, 0x0330,
0x93ff, 0x0430, 0x97ff, 0x0530, 0x9bff, 0x0630, 0x9fff, 0x0730,
0xa3ff, 0x0830, 0xa7ff, 0x0930, 0xabff, 0x0a30, 0xafff, 0x0b30,
0xb3ff, 0x0c30, 0xb7ff, 0x0d30, 0xbbff, 0x0e30, 0xbfff, 0x0f30,
0xc3ff, 0x0030, 0xc7ff, 0x0130, 0xcbff, 0x0230, 0xcfff, 0x0330,
0xd3ff, 0x0430, 0xd7ff, 0x0530, 0xdbff, 0x0630, 0xdfff, 0x0730,
0xe3ff, 0x0830, 0xe7ff, 0x0930, 0xebff, 0x0a30, 0xefff, 0x0b30,
0xf3ff, 0x0c30, 0xf7ff, 0x0d30, 0xfbff, 0x0e30, 0xffff, 0x0f30,
};
static unsigned short init2[128] /*__devinitdata*/ = {
0x03ff, 0x8030, 0x07ff, 0x8130, 0x0bff, 0x8230, 0x0fff, 0x8330,
0x13ff, 0x8430, 0x17ff, 0x8530, 0x1bff, 0x8630, 0x1fff, 0x8730,
0x23ff, 0x8830, 0x27ff, 0x8930, 0x2bff, 0x8a30, 0x2fff, 0x8b30,
0x33ff, 0x8c30, 0x37ff, 0x8d30, 0x3bff, 0x8e30, 0x3fff, 0x8f30,
0x43ff, 0x8030, 0x47ff, 0x8130, 0x4bff, 0x8230, 0x4fff, 0x8330,
0x53ff, 0x8430, 0x57ff, 0x8530, 0x5bff, 0x8630, 0x5fff, 0x8730,
0x63ff, 0x8830, 0x67ff, 0x8930, 0x6bff, 0x8a30, 0x6fff, 0x8b30,
0x73ff, 0x8c30, 0x77ff, 0x8d30, 0x7bff, 0x8e30, 0x7fff, 0x8f30,
0x83ff, 0x8030, 0x87ff, 0x8130, 0x8bff, 0x8230, 0x8fff, 0x8330,
0x93ff, 0x8430, 0x97ff, 0x8530, 0x9bff, 0x8630, 0x9fff, 0x8730,
0xa3ff, 0x8830, 0xa7ff, 0x8930, 0xabff, 0x8a30, 0xafff, 0x8b30,
0xb3ff, 0x8c30, 0xb7ff, 0x8d30, 0xbbff, 0x8e30, 0xbfff, 0x8f30,
0xc3ff, 0x8030, 0xc7ff, 0x8130, 0xcbff, 0x8230, 0xcfff, 0x8330,
0xd3ff, 0x8430, 0xd7ff, 0x8530, 0xdbff, 0x8630, 0xdfff, 0x8730,
0xe3ff, 0x8830, 0xe7ff, 0x8930, 0xebff, 0x8a30, 0xefff, 0x8b30,
0xf3ff, 0x8c30, 0xf7ff, 0x8d30, 0xfbff, 0x8e30, 0xffff, 0x8f30,
};
static unsigned short init3[128] /*__devinitdata*/ = {
0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x8F7C, 0x167E, 0xF254,
0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x8BAA, 0x1B6D, 0xF234,
0x229F, 0x8429, 0x2746, 0x8529, 0x1F1C, 0x86E7, 0x229E, 0xF224,
0x0DA4, 0x8429, 0x2C29, 0x8529, 0x2745, 0x87F6, 0x2C28, 0xF254,
0x383B, 0x8428, 0x320F, 0x8528, 0x320E, 0x8F02, 0x1341, 0xF264,
0x3EB6, 0x8428, 0x3EB9, 0x8528, 0x383A, 0x8FA9, 0x3EB5, 0xF294,
0x3EB7, 0x8474, 0x3EBA, 0x8575, 0x3EB8, 0xC4C3, 0x3EBB, 0xC5C3,
0x0000, 0xA404, 0x0001, 0xA504, 0x141F, 0x8671, 0x14FD, 0x8287,
0x3EBC, 0xE610, 0x3EC8, 0x8C7B, 0x031A, 0x87E6, 0x3EC8, 0x86F7,
0x3EC0, 0x821E, 0x3EBE, 0xD208, 0x3EBD, 0x821F, 0x3ECA, 0x8386,
0x3EC1, 0x8C03, 0x3EC9, 0x831E, 0x3ECA, 0x8C4C, 0x3EBF, 0x8C55,
0x3EC9, 0xC208, 0x3EC4, 0xBC84, 0x3EC8, 0x8EAD, 0x3EC8, 0xD308,
0x3EC2, 0x8F7E, 0x3ECB, 0x8219, 0x3ECB, 0xD26E, 0x3EC5, 0x831F,
0x3EC6, 0xC308, 0x3EC3, 0xB2FF, 0x3EC9, 0x8265, 0x3EC9, 0x8319,
0x1342, 0xD36E, 0x3EC7, 0xB3FF, 0x0000, 0x8365, 0x1420, 0x9570,
};
static unsigned short init4[128] /*__devinitdata*/ = {
0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x0F7C, 0x167E, 0x7254,
0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x0BAA, 0x1B6D, 0x7234,
0x229F, 0x8429, 0x2746, 0x8529, 0x1F1C, 0x06E7, 0x229E, 0x7224,
0x0DA4, 0x8429, 0x2C29, 0x8529, 0x2745, 0x07F6, 0x2C28, 0x7254,
0x383B, 0x8428, 0x320F, 0x8528, 0x320E, 0x0F02, 0x1341, 0x7264,
0x3EB6, 0x8428, 0x3EB9, 0x8528, 0x383A, 0x0FA9, 0x3EB5, 0x7294,
0x3EB7, 0x8474, 0x3EBA, 0x8575, 0x3EB8, 0x44C3, 0x3EBB, 0x45C3,
0x0000, 0xA404, 0x0001, 0xA504, 0x141F, 0x0671, 0x14FD, 0x0287,
0x3EBC, 0xE610, 0x3EC8, 0x0C7B, 0x031A, 0x07E6, 0x3EC8, 0x86F7,
0x3EC0, 0x821E, 0x3EBE, 0xD208, 0x3EBD, 0x021F, 0x3ECA, 0x0386,
0x3EC1, 0x0C03, 0x3EC9, 0x031E, 0x3ECA, 0x8C4C, 0x3EBF, 0x0C55,
0x3EC9, 0xC208, 0x3EC4, 0xBC84, 0x3EC8, 0x0EAD, 0x3EC8, 0xD308,
0x3EC2, 0x8F7E, 0x3ECB, 0x0219, 0x3ECB, 0xD26E, 0x3EC5, 0x031F,
0x3EC6, 0xC308, 0x3EC3, 0x32FF, 0x3EC9, 0x0265, 0x3EC9, 0x8319,
0x1342, 0xD36E, 0x3EC7, 0x33FF, 0x0000, 0x8365, 0x1420, 0x9570,
};
/* send an initialization array
* Taken from the oss driver, not obvious from the doc how this
* is meant to work
*/
static void __devinit
send_array(struct snd_emu8000 *emu, unsigned short *data, int size)
{
int i;
unsigned short *p;
p = data;
for (i = 0; i < size; i++, p++)
EMU8000_INIT1_WRITE(emu, i, *p);
for (i = 0; i < size; i++, p++)
EMU8000_INIT2_WRITE(emu, i, *p);
for (i = 0; i < size; i++, p++)
EMU8000_INIT3_WRITE(emu, i, *p);
for (i = 0; i < size; i++, p++)
EMU8000_INIT4_WRITE(emu, i, *p);
}
/*
* Send initialization arrays to start up, this just follows the
* initialisation sequence in the adip.
*/
static void __devinit
init_arrays(struct snd_emu8000 *emu)
{
send_array(emu, init1, ARRAY_SIZE(init1)/4);
msleep((1024 * 1000) / 44100); /* wait for 1024 clocks */
send_array(emu, init2, ARRAY_SIZE(init2)/4);
send_array(emu, init3, ARRAY_SIZE(init3)/4);
EMU8000_HWCF4_WRITE(emu, 0);
EMU8000_HWCF5_WRITE(emu, 0x83);
EMU8000_HWCF6_WRITE(emu, 0x8000);
send_array(emu, init4, ARRAY_SIZE(init4)/4);
}
#define UNIQUE_ID1 0xa5b9
#define UNIQUE_ID2 0x9d53
/*
* Size the onboard memory.
* This is written so as not to need arbitrary delays after the write. It
* seems that the only way to do this is to use the one channel and keep
* reallocating between read and write.
*/
static void __devinit
size_dram(struct snd_emu8000 *emu)
{
int i, size, detected_size;
if (emu->dram_checked)
return;
size = 0;
detected_size = 0;
/* write out a magic number */
snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_WRITE);
snd_emu8000_dma_chan(emu, 1, EMU8000_RAM_READ);
EMU8000_SMALW_WRITE(emu, EMU8000_DRAM_OFFSET);
EMU8000_SMLD_WRITE(emu, UNIQUE_ID1);
snd_emu8000_init_fm(emu); /* This must really be here and not 2 lines back even */
while (size < EMU8000_MAX_DRAM) {
size += 512 * 1024; /* increment 512kbytes */
/* Write a unique data on the test address.
* if the address is out of range, the data is written on
* 0x200000(=EMU8000_DRAM_OFFSET). Then the id word is
* changed by this data.
*/
/*snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_WRITE);*/
EMU8000_SMALW_WRITE(emu, EMU8000_DRAM_OFFSET + (size>>1));
EMU8000_SMLD_WRITE(emu, UNIQUE_ID2);
snd_emu8000_write_wait(emu);
/*
* read the data on the just written DRAM address
* if not the same then we have reached the end of ram.
*/
/*snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_READ);*/
EMU8000_SMALR_WRITE(emu, EMU8000_DRAM_OFFSET + (size>>1));
/*snd_emu8000_read_wait(emu);*/
EMU8000_SMLD_READ(emu); /* discard stale data */
if (EMU8000_SMLD_READ(emu) != UNIQUE_ID2)
break; /* no memory at this address */
detected_size = size;
snd_emu8000_read_wait(emu);
/*
* If it is the same it could be that the address just
* wraps back to the beginning; so check to see if the
* initial value has been overwritten.
*/
EMU8000_SMALR_WRITE(emu, EMU8000_DRAM_OFFSET);
EMU8000_SMLD_READ(emu); /* discard stale data */
if (EMU8000_SMLD_READ(emu) != UNIQUE_ID1)
break; /* we must have wrapped around */
snd_emu8000_read_wait(emu);
}
/* wait until FULL bit in SMAxW register is false */
for (i = 0; i < 10000; i++) {
if ((EMU8000_SMALW_READ(emu) & 0x80000000) == 0)
break;
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
}
snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_CLOSE);
snd_emu8000_dma_chan(emu, 1, EMU8000_RAM_CLOSE);
snd_printdd("EMU8000 [0x%lx]: %d Kb on-board memory detected\n",
emu->port1, detected_size/1024);
emu->mem_size = detected_size;
emu->dram_checked = 1;
}
/*
* Initiailise the FM section. You have to do this to use sample RAM
* and therefore lose 2 voices.
*/
/*exported*/ void
snd_emu8000_init_fm(struct snd_emu8000 *emu)
{
unsigned long flags;
/* Initialize the last two channels for DRAM refresh and producing
the reverb and chorus effects for Yamaha OPL-3 synthesizer */
/* 31: FM left channel, 0xffffe0-0xffffe8 */
EMU8000_DCYSUSV_WRITE(emu, 30, 0x80);
EMU8000_PSST_WRITE(emu, 30, 0xFFFFFFE0); /* full left */
EMU8000_CSL_WRITE(emu, 30, 0x00FFFFE8 | (emu->fm_chorus_depth << 24));
EMU8000_PTRX_WRITE(emu, 30, (emu->fm_reverb_depth << 8));
EMU8000_CPF_WRITE(emu, 30, 0);
EMU8000_CCCA_WRITE(emu, 30, 0x00FFFFE3);
/* 32: FM right channel, 0xfffff0-0xfffff8 */
EMU8000_DCYSUSV_WRITE(emu, 31, 0x80);
EMU8000_PSST_WRITE(emu, 31, 0x00FFFFF0); /* full right */
EMU8000_CSL_WRITE(emu, 31, 0x00FFFFF8 | (emu->fm_chorus_depth << 24));
EMU8000_PTRX_WRITE(emu, 31, (emu->fm_reverb_depth << 8));
EMU8000_CPF_WRITE(emu, 31, 0x8000);
EMU8000_CCCA_WRITE(emu, 31, 0x00FFFFF3);
snd_emu8000_poke((emu), EMU8000_DATA0(emu), EMU8000_CMD(1, (30)), 0);
spin_lock_irqsave(&emu->reg_lock, flags);
while (!(inw(EMU8000_PTR(emu)) & 0x1000))
;
while ((inw(EMU8000_PTR(emu)) & 0x1000))
;
spin_unlock_irqrestore(&emu->reg_lock, flags);
snd_emu8000_poke((emu), EMU8000_DATA0(emu), EMU8000_CMD(1, (30)), 0x4828);
/* this is really odd part.. */
outb(0x3C, EMU8000_PTR(emu));
outb(0, EMU8000_DATA1(emu));
/* skew volume & cutoff */
EMU8000_VTFT_WRITE(emu, 30, 0x8000FFFF);
EMU8000_VTFT_WRITE(emu, 31, 0x8000FFFF);
}
/*
* The main initialization routine.
*/
static void __devinit
snd_emu8000_init_hw(struct snd_emu8000 *emu)
{
int i;
emu->last_reg = 0xffff; /* reset the last register index */
/* initialize hardware configuration */
EMU8000_HWCF1_WRITE(emu, 0x0059);
EMU8000_HWCF2_WRITE(emu, 0x0020);
/* disable audio; this seems to reduce a clicking noise a bit.. */
EMU8000_HWCF3_WRITE(emu, 0);
/* initialize audio channels */
init_audio(emu);
/* initialize DMA */
init_dma(emu);
/* initialize init arrays */
init_arrays(emu);
/*
* Initialize the FM section of the AWE32, this is needed
* for DRAM refresh as well
*/
snd_emu8000_init_fm(emu);
/* terminate all voices */
for (i = 0; i < EMU8000_DRAM_VOICES; i++)
EMU8000_DCYSUSV_WRITE(emu, 0, 0x807F);
/* check DRAM memory size */
size_dram(emu);
/* enable audio */
EMU8000_HWCF3_WRITE(emu, 0x4);
/* set equzlier, chorus and reverb modes */
snd_emu8000_update_equalizer(emu);
snd_emu8000_update_chorus_mode(emu);
snd_emu8000_update_reverb_mode(emu);
}
/*----------------------------------------------------------------
* Bass/Treble Equalizer
*----------------------------------------------------------------*/
static unsigned short bass_parm[12][3] = {
{0xD26A, 0xD36A, 0x0000}, /* -12 dB */
{0xD25B, 0xD35B, 0x0000}, /* -8 */
{0xD24C, 0xD34C, 0x0000}, /* -6 */
{0xD23D, 0xD33D, 0x0000}, /* -4 */
{0xD21F, 0xD31F, 0x0000}, /* -2 */
{0xC208, 0xC308, 0x0001}, /* 0 (HW default) */
{0xC219, 0xC319, 0x0001}, /* +2 */
{0xC22A, 0xC32A, 0x0001}, /* +4 */
{0xC24C, 0xC34C, 0x0001}, /* +6 */
{0xC26E, 0xC36E, 0x0001}, /* +8 */
{0xC248, 0xC384, 0x0002}, /* +10 */
{0xC26A, 0xC36A, 0x0002}, /* +12 dB */
};
static unsigned short treble_parm[12][9] = {
{0x821E, 0xC26A, 0x031E, 0xC36A, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001}, /* -12 dB */
{0x821E, 0xC25B, 0x031E, 0xC35B, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
{0x821E, 0xC24C, 0x031E, 0xC34C, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
{0x821E, 0xC23D, 0x031E, 0xC33D, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
{0x821E, 0xC21F, 0x031E, 0xC31F, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
{0x821E, 0xD208, 0x031E, 0xD308, 0x021E, 0xD208, 0x831E, 0xD308, 0x0002},
{0x821E, 0xD208, 0x031E, 0xD308, 0x021D, 0xD219, 0x831D, 0xD319, 0x0002},
{0x821E, 0xD208, 0x031E, 0xD308, 0x021C, 0xD22A, 0x831C, 0xD32A, 0x0002},
{0x821E, 0xD208, 0x031E, 0xD308, 0x021A, 0xD24C, 0x831A, 0xD34C, 0x0002},
{0x821E, 0xD208, 0x031E, 0xD308, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002}, /* +8 (HW default) */
{0x821D, 0xD219, 0x031D, 0xD319, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002},
{0x821C, 0xD22A, 0x031C, 0xD32A, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002} /* +12 dB */
};
/*
* set Emu8000 digital equalizer; from 0 to 11 [-12dB - 12dB]
*/
/*exported*/ void
snd_emu8000_update_equalizer(struct snd_emu8000 *emu)
{
unsigned short w;
int bass = emu->bass_level;
int treble = emu->treble_level;
if (bass < 0 || bass > 11 || treble < 0 || treble > 11)
return;
EMU8000_INIT4_WRITE(emu, 0x01, bass_parm[bass][0]);
EMU8000_INIT4_WRITE(emu, 0x11, bass_parm[bass][1]);
EMU8000_INIT3_WRITE(emu, 0x11, treble_parm[treble][0]);
EMU8000_INIT3_WRITE(emu, 0x13, treble_parm[treble][1]);
EMU8000_INIT3_WRITE(emu, 0x1b, treble_parm[treble][2]);
EMU8000_INIT4_WRITE(emu, 0x07, treble_parm[treble][3]);
EMU8000_INIT4_WRITE(emu, 0x0b, treble_parm[treble][4]);
EMU8000_INIT4_WRITE(emu, 0x0d, treble_parm[treble][5]);
EMU8000_INIT4_WRITE(emu, 0x17, treble_parm[treble][6]);
EMU8000_INIT4_WRITE(emu, 0x19, treble_parm[treble][7]);
w = bass_parm[bass][2] + treble_parm[treble][8];
EMU8000_INIT4_WRITE(emu, 0x15, (unsigned short)(w + 0x0262));
EMU8000_INIT4_WRITE(emu, 0x1d, (unsigned short)(w + 0x8362));
}
/*----------------------------------------------------------------
* Chorus mode control
*----------------------------------------------------------------*/
/*
* chorus mode parameters
*/
#define SNDRV_EMU8000_CHORUS_1 0
#define SNDRV_EMU8000_CHORUS_2 1
#define SNDRV_EMU8000_CHORUS_3 2
#define SNDRV_EMU8000_CHORUS_4 3
#define SNDRV_EMU8000_CHORUS_FEEDBACK 4
#define SNDRV_EMU8000_CHORUS_FLANGER 5
#define SNDRV_EMU8000_CHORUS_SHORTDELAY 6
#define SNDRV_EMU8000_CHORUS_SHORTDELAY2 7
#define SNDRV_EMU8000_CHORUS_PREDEFINED 8
/* user can define chorus modes up to 32 */
#define SNDRV_EMU8000_CHORUS_NUMBERS 32
struct soundfont_chorus_fx {
unsigned short feedback; /* feedback level (0xE600-0xE6FF) */
unsigned short delay_offset; /* delay (0-0x0DA3) [1/44100 sec] */
unsigned short lfo_depth; /* LFO depth (0xBC00-0xBCFF) */
unsigned int delay; /* right delay (0-0xFFFFFFFF) [1/256/44100 sec] */
unsigned int lfo_freq; /* LFO freq LFO freq (0-0xFFFFFFFF) */
};
/* 5 parameters for each chorus mode; 3 x 16bit, 2 x 32bit */
static char chorus_defined[SNDRV_EMU8000_CHORUS_NUMBERS];
static struct soundfont_chorus_fx chorus_parm[SNDRV_EMU8000_CHORUS_NUMBERS] = {
{0xE600, 0x03F6, 0xBC2C ,0x00000000, 0x0000006D}, /* chorus 1 */
{0xE608, 0x031A, 0xBC6E, 0x00000000, 0x0000017C}, /* chorus 2 */
{0xE610, 0x031A, 0xBC84, 0x00000000, 0x00000083}, /* chorus 3 */
{0xE620, 0x0269, 0xBC6E, 0x00000000, 0x0000017C}, /* chorus 4 */
{0xE680, 0x04D3, 0xBCA6, 0x00000000, 0x0000005B}, /* feedback */
{0xE6E0, 0x044E, 0xBC37, 0x00000000, 0x00000026}, /* flanger */
{0xE600, 0x0B06, 0xBC00, 0x0006E000, 0x00000083}, /* short delay */
{0xE6C0, 0x0B06, 0xBC00, 0x0006E000, 0x00000083}, /* short delay + feedback */
};
/*exported*/ int
snd_emu8000_load_chorus_fx(struct snd_emu8000 *emu, int mode, const void __user *buf, long len)
{
struct soundfont_chorus_fx rec;
if (mode < SNDRV_EMU8000_CHORUS_PREDEFINED || mode >= SNDRV_EMU8000_CHORUS_NUMBERS) {
snd_printk(KERN_WARNING "invalid chorus mode %d for uploading\n", mode);
return -EINVAL;
}
if (len < (long)sizeof(rec) || copy_from_user(&rec, buf, sizeof(rec)))
return -EFAULT;
chorus_parm[mode] = rec;
chorus_defined[mode] = 1;
return 0;
}
/*exported*/ void
snd_emu8000_update_chorus_mode(struct snd_emu8000 *emu)
{
int effect = emu->chorus_mode;
if (effect < 0 || effect >= SNDRV_EMU8000_CHORUS_NUMBERS ||
(effect >= SNDRV_EMU8000_CHORUS_PREDEFINED && !chorus_defined[effect]))
return;
EMU8000_INIT3_WRITE(emu, 0x09, chorus_parm[effect].feedback);
EMU8000_INIT3_WRITE(emu, 0x0c, chorus_parm[effect].delay_offset);
EMU8000_INIT4_WRITE(emu, 0x03, chorus_parm[effect].lfo_depth);
EMU8000_HWCF4_WRITE(emu, chorus_parm[effect].delay);
EMU8000_HWCF5_WRITE(emu, chorus_parm[effect].lfo_freq);
EMU8000_HWCF6_WRITE(emu, 0x8000);
EMU8000_HWCF7_WRITE(emu, 0x0000);
}
/*----------------------------------------------------------------
* Reverb mode control
*----------------------------------------------------------------*/
/*
* reverb mode parameters
*/
#define SNDRV_EMU8000_REVERB_ROOM1 0
#define SNDRV_EMU8000_REVERB_ROOM2 1
#define SNDRV_EMU8000_REVERB_ROOM3 2
#define SNDRV_EMU8000_REVERB_HALL1 3
#define SNDRV_EMU8000_REVERB_HALL2 4
#define SNDRV_EMU8000_REVERB_PLATE 5
#define SNDRV_EMU8000_REVERB_DELAY 6
#define SNDRV_EMU8000_REVERB_PANNINGDELAY 7
#define SNDRV_EMU8000_REVERB_PREDEFINED 8
/* user can define reverb modes up to 32 */
#define SNDRV_EMU8000_REVERB_NUMBERS 32
struct soundfont_reverb_fx {
unsigned short parms[28];
};
/* reverb mode settings; write the following 28 data of 16 bit length
* on the corresponding ports in the reverb_cmds array
*/
static char reverb_defined[SNDRV_EMU8000_CHORUS_NUMBERS];
static struct soundfont_reverb_fx reverb_parm[SNDRV_EMU8000_REVERB_NUMBERS] = {
{{ /* room 1 */
0xB488, 0xA450, 0x9550, 0x84B5, 0x383A, 0x3EB5, 0x72F4,
0x72A4, 0x7254, 0x7204, 0x7204, 0x7204, 0x4416, 0x4516,
0xA490, 0xA590, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
}},
{{ /* room 2 */
0xB488, 0xA458, 0x9558, 0x84B5, 0x383A, 0x3EB5, 0x7284,
0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4448, 0x4548,
0xA440, 0xA540, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
}},
{{ /* room 3 */
0xB488, 0xA460, 0x9560, 0x84B5, 0x383A, 0x3EB5, 0x7284,
0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4416, 0x4516,
0xA490, 0xA590, 0x842C, 0x852C, 0x842C, 0x852C, 0x842B,
0x852B, 0x842B, 0x852B, 0x842A, 0x852A, 0x842A, 0x852A,
}},
{{ /* hall 1 */
0xB488, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7284,
0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4448, 0x4548,
0xA440, 0xA540, 0x842B, 0x852B, 0x842B, 0x852B, 0x842A,
0x852A, 0x842A, 0x852A, 0x8429, 0x8529, 0x8429, 0x8529,
}},
{{ /* hall 2 */
0xB488, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7254,
0x7234, 0x7224, 0x7254, 0x7264, 0x7294, 0x44C3, 0x45C3,
0xA404, 0xA504, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
}},
{{ /* plate */
0xB4FF, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7234,
0x7234, 0x7234, 0x7234, 0x7234, 0x7234, 0x4448, 0x4548,
0xA440, 0xA540, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
}},
{{ /* delay */
0xB4FF, 0xA470, 0x9500, 0x84B5, 0x333A, 0x39B5, 0x7204,
0x7204, 0x7204, 0x7204, 0x7204, 0x72F4, 0x4400, 0x4500,
0xA4FF, 0xA5FF, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420,
0x8520, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420, 0x8520,
}},
{{ /* panning delay */
0xB4FF, 0xA490, 0x9590, 0x8474, 0x333A, 0x39B5, 0x7204,
0x7204, 0x7204, 0x7204, 0x7204, 0x72F4, 0x4400, 0x4500,
0xA4FF, 0xA5FF, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420,
0x8520, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420, 0x8520,
}},
};
enum { DATA1, DATA2 };
#define AWE_INIT1(c) EMU8000_CMD(2,c), DATA1
#define AWE_INIT2(c) EMU8000_CMD(2,c), DATA2
#define AWE_INIT3(c) EMU8000_CMD(3,c), DATA1
#define AWE_INIT4(c) EMU8000_CMD(3,c), DATA2
static struct reverb_cmd_pair {
unsigned short cmd, port;
} reverb_cmds[28] = {
{AWE_INIT1(0x03)}, {AWE_INIT1(0x05)}, {AWE_INIT4(0x1F)}, {AWE_INIT1(0x07)},
{AWE_INIT2(0x14)}, {AWE_INIT2(0x16)}, {AWE_INIT1(0x0F)}, {AWE_INIT1(0x17)},
{AWE_INIT1(0x1F)}, {AWE_INIT2(0x07)}, {AWE_INIT2(0x0F)}, {AWE_INIT2(0x17)},
{AWE_INIT2(0x1D)}, {AWE_INIT2(0x1F)}, {AWE_INIT3(0x01)}, {AWE_INIT3(0x03)},
{AWE_INIT1(0x09)}, {AWE_INIT1(0x0B)}, {AWE_INIT1(0x11)}, {AWE_INIT1(0x13)},
{AWE_INIT1(0x19)}, {AWE_INIT1(0x1B)}, {AWE_INIT2(0x01)}, {AWE_INIT2(0x03)},
{AWE_INIT2(0x09)}, {AWE_INIT2(0x0B)}, {AWE_INIT2(0x11)}, {AWE_INIT2(0x13)},
};
/*exported*/ int
snd_emu8000_load_reverb_fx(struct snd_emu8000 *emu, int mode, const void __user *buf, long len)
{
struct soundfont_reverb_fx rec;
if (mode < SNDRV_EMU8000_REVERB_PREDEFINED || mode >= SNDRV_EMU8000_REVERB_NUMBERS) {
snd_printk(KERN_WARNING "invalid reverb mode %d for uploading\n", mode);
return -EINVAL;
}
if (len < (long)sizeof(rec) || copy_from_user(&rec, buf, sizeof(rec)))
return -EFAULT;
reverb_parm[mode] = rec;
reverb_defined[mode] = 1;
return 0;
}
/*exported*/ void
snd_emu8000_update_reverb_mode(struct snd_emu8000 *emu)
{
int effect = emu->reverb_mode;
int i;
if (effect < 0 || effect >= SNDRV_EMU8000_REVERB_NUMBERS ||
(effect >= SNDRV_EMU8000_REVERB_PREDEFINED && !reverb_defined[effect]))
return;
for (i = 0; i < 28; i++) {
int port;
if (reverb_cmds[i].port == DATA1)
port = EMU8000_DATA1(emu);
else
port = EMU8000_DATA2(emu);
snd_emu8000_poke(emu, port, reverb_cmds[i].cmd, reverb_parm[effect].parms[i]);
}
}
/*----------------------------------------------------------------
* mixer interface
*----------------------------------------------------------------*/
/*
* bass/treble
*/
static int mixer_bass_treble_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 11;
return 0;
}
static int mixer_bass_treble_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = kcontrol->private_value ? emu->treble_level : emu->bass_level;
return 0;
}
static int mixer_bass_treble_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned short val1;
val1 = ucontrol->value.integer.value[0] % 12;
spin_lock_irqsave(&emu->control_lock, flags);
if (kcontrol->private_value) {
change = val1 != emu->treble_level;
emu->treble_level = val1;
} else {
change = val1 != emu->bass_level;
emu->bass_level = val1;
}
spin_unlock_irqrestore(&emu->control_lock, flags);
snd_emu8000_update_equalizer(emu);
return change;
}
static struct snd_kcontrol_new mixer_bass_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Synth Tone Control - Bass",
.info = mixer_bass_treble_info,
.get = mixer_bass_treble_get,
.put = mixer_bass_treble_put,
.private_value = 0,
};
static struct snd_kcontrol_new mixer_treble_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Synth Tone Control - Treble",
.info = mixer_bass_treble_info,
.get = mixer_bass_treble_get,
.put = mixer_bass_treble_put,
.private_value = 1,
};
/*
* chorus/reverb mode
*/
static int mixer_chorus_reverb_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = kcontrol->private_value ? (SNDRV_EMU8000_CHORUS_NUMBERS-1) : (SNDRV_EMU8000_REVERB_NUMBERS-1);
return 0;
}
static int mixer_chorus_reverb_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = kcontrol->private_value ? emu->chorus_mode : emu->reverb_mode;
return 0;
}
static int mixer_chorus_reverb_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned short val1;
spin_lock_irqsave(&emu->control_lock, flags);
if (kcontrol->private_value) {
val1 = ucontrol->value.integer.value[0] % SNDRV_EMU8000_CHORUS_NUMBERS;
change = val1 != emu->chorus_mode;
emu->chorus_mode = val1;
} else {
val1 = ucontrol->value.integer.value[0] % SNDRV_EMU8000_REVERB_NUMBERS;
change = val1 != emu->reverb_mode;
emu->reverb_mode = val1;
}
spin_unlock_irqrestore(&emu->control_lock, flags);
if (change) {
if (kcontrol->private_value)
snd_emu8000_update_chorus_mode(emu);
else
snd_emu8000_update_reverb_mode(emu);
}
return change;
}
static struct snd_kcontrol_new mixer_chorus_mode_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Chorus Mode",
.info = mixer_chorus_reverb_info,
.get = mixer_chorus_reverb_get,
.put = mixer_chorus_reverb_put,
.private_value = 1,
};
static struct snd_kcontrol_new mixer_reverb_mode_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Reverb Mode",
.info = mixer_chorus_reverb_info,
.get = mixer_chorus_reverb_get,
.put = mixer_chorus_reverb_put,
.private_value = 0,
};
/*
* FM OPL3 chorus/reverb depth
*/
static int mixer_fm_depth_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 255;
return 0;
}
static int mixer_fm_depth_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = kcontrol->private_value ? emu->fm_chorus_depth : emu->fm_reverb_depth;
return 0;
}
static int mixer_fm_depth_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_emu8000 *emu = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned short val1;
val1 = ucontrol->value.integer.value[0] % 256;
spin_lock_irqsave(&emu->control_lock, flags);
if (kcontrol->private_value) {
change = val1 != emu->fm_chorus_depth;
emu->fm_chorus_depth = val1;
} else {
change = val1 != emu->fm_reverb_depth;
emu->fm_reverb_depth = val1;
}
spin_unlock_irqrestore(&emu->control_lock, flags);
if (change)
snd_emu8000_init_fm(emu);
return change;
}
static struct snd_kcontrol_new mixer_fm_chorus_depth_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "FM Chorus Depth",
.info = mixer_fm_depth_info,
.get = mixer_fm_depth_get,
.put = mixer_fm_depth_put,
.private_value = 1,
};
static struct snd_kcontrol_new mixer_fm_reverb_depth_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "FM Reverb Depth",
.info = mixer_fm_depth_info,
.get = mixer_fm_depth_get,
.put = mixer_fm_depth_put,
.private_value = 0,
};
static struct snd_kcontrol_new *mixer_defs[EMU8000_NUM_CONTROLS] = {
&mixer_bass_control,
&mixer_treble_control,
&mixer_chorus_mode_control,
&mixer_reverb_mode_control,
&mixer_fm_chorus_depth_control,
&mixer_fm_reverb_depth_control,
};
/*
* create and attach mixer elements for WaveTable treble/bass controls
*/
static int __devinit
snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
{
int i, err = 0;
if (snd_BUG_ON(!emu || !card))
return -EINVAL;
spin_lock_init(&emu->control_lock);
memset(emu->controls, 0, sizeof(emu->controls));
for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
goto __error;
}
return 0;
__error:
for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
down_write(&card->controls_rwsem);
if (emu->controls[i])
snd_ctl_remove(card, emu->controls[i]);
up_write(&card->controls_rwsem);
}
return err;
}
/*
* free resources
*/
static int snd_emu8000_free(struct snd_emu8000 *hw)
{
release_and_free_resource(hw->res_port1);
release_and_free_resource(hw->res_port2);
release_and_free_resource(hw->res_port3);
kfree(hw);
return 0;
}
/*
*/
static int snd_emu8000_dev_free(struct snd_device *device)
{
struct snd_emu8000 *hw = device->device_data;
return snd_emu8000_free(hw);
}
/*
* initialize and register emu8000 synth device.
*/
int __devinit
snd_emu8000_new(struct snd_card *card, int index, long port, int seq_ports,
struct snd_seq_device **awe_ret)
{
struct snd_seq_device *awe;
struct snd_emu8000 *hw;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_emu8000_dev_free,
};
if (awe_ret)
*awe_ret = NULL;
if (seq_ports <= 0)
return 0;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (hw == NULL)
return -ENOMEM;
spin_lock_init(&hw->reg_lock);
hw->index = index;
hw->port1 = port;
hw->port2 = port + 0x400;
hw->port3 = port + 0x800;
if (!(hw->res_port1 = request_region(hw->port1, 4, "Emu8000-1")) ||
!(hw->res_port2 = request_region(hw->port2, 4, "Emu8000-2")) ||
!(hw->res_port3 = request_region(hw->port3, 4, "Emu8000-3"))) {
snd_printk(KERN_ERR "sbawe: can't grab ports 0x%lx, 0x%lx, 0x%lx\n", hw->port1, hw->port2, hw->port3);
snd_emu8000_free(hw);
return -EBUSY;
}
hw->mem_size = 0;
hw->card = card;
hw->seq_ports = seq_ports;
hw->bass_level = 5;
hw->treble_level = 9;
hw->chorus_mode = 2;
hw->reverb_mode = 4;
hw->fm_chorus_depth = 0;
hw->fm_reverb_depth = 0;
if (snd_emu8000_detect(hw) < 0) {
snd_emu8000_free(hw);
return -ENODEV;
}
snd_emu8000_init_hw(hw);
if ((err = snd_emu8000_create_mixer(card, hw)) < 0) {
snd_emu8000_free(hw);
return err;
}
if ((err = snd_device_new(card, SNDRV_DEV_CODEC, hw, &ops)) < 0) {
snd_emu8000_free(hw);
return err;
}
#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
if (snd_seq_device_new(card, index, SNDRV_SEQ_DEV_ID_EMU8000,
sizeof(struct snd_emu8000*), &awe) >= 0) {
strcpy(awe->name, "EMU-8000");
*(struct snd_emu8000 **)SNDRV_SEQ_DEVICE_ARGPTR(awe) = hw;
}
#else
awe = NULL;
#endif
if (awe_ret)
*awe_ret = awe;
return 0;
}
/*
* exported stuff
*/
EXPORT_SYMBOL(snd_emu8000_poke);
EXPORT_SYMBOL(snd_emu8000_peek);
EXPORT_SYMBOL(snd_emu8000_poke_dw);
EXPORT_SYMBOL(snd_emu8000_peek_dw);
EXPORT_SYMBOL(snd_emu8000_dma_chan);
EXPORT_SYMBOL(snd_emu8000_init_fm);
EXPORT_SYMBOL(snd_emu8000_load_chorus_fx);
EXPORT_SYMBOL(snd_emu8000_load_reverb_fx);
EXPORT_SYMBOL(snd_emu8000_update_chorus_mode);
EXPORT_SYMBOL(snd_emu8000_update_reverb_mode);
EXPORT_SYMBOL(snd_emu8000_update_equalizer);
| gpl-2.0 |
erik96/Linux-Kernel-3.4 | arch/arm/mm/idmap.c | 3176 | 2832 | #include <linux/kernel.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
pgd_t *idmap_pgd;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd;
unsigned long next;
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
pmd = pmd_alloc_one(&init_mm, addr);
if (!pmd) {
pr_warning("Failed to allocate identity pmd.\n");
return;
}
pud_populate(&init_mm, pud, pmd);
pmd += pmd_index(addr);
} else
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
*pmd = __pmd((addr & PMD_MASK) | prot);
flush_pmd_entry(pmd);
} while (pmd++, addr = next, addr != end);
}
#else /* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd = pmd_offset(pud, addr);
addr = (addr & PMD_MASK) | prot;
pmd[0] = __pmd(addr);
addr += SECTION_SIZE;
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
#endif /* CONFIG_ARM_LPAE */
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_add_pmd(pud, addr, next, prot);
} while (pud++, addr = next, addr != end);
}
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_add_pud(pgd, addr, next, prot);
} while (pgd++, addr = next, addr != end);
}
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
phys_addr_t idmap_start, idmap_end;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
/* Add an identity mapping for the physical address of the section. */
idmap_start = virt_to_phys((void *)__idmap_text_start);
idmap_end = virt_to_phys((void *)__idmap_text_end);
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
(long long)idmap_start, (long long)idmap_end);
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
return 0;
}
early_initcall(init_static_idmap);
/*
* In order to soft-boot, we need to switch to a 1:1 mapping for the
* cpu_reset functions. This will then ensure that we have predictable
* results when turning off the mmu.
*/
void setup_mm_for_reboot(void)
{
/* Clean and invalidate L1. */
flush_cache_all();
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
/* Flush the TLB. */
local_flush_tlb_all();
}
| gpl-2.0 |
TeamEOS/kernel_oppo_r5 | arch/c6x/kernel/traps.c | 4200 | 14886 | /*
* Port on Texas Instruments TMS320C6x architecture
*
* Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <linux/bug.h>
#include <asm/soc.h>
#include <asm/special_insns.h>
#include <asm/traps.h>
int (*c6x_nmi_handler)(struct pt_regs *regs);
void __init trap_init(void)
{
ack_exception(EXCEPT_TYPE_NXF);
ack_exception(EXCEPT_TYPE_EXC);
ack_exception(EXCEPT_TYPE_IXF);
ack_exception(EXCEPT_TYPE_SXF);
enable_exception();
}
void show_regs(struct pt_regs *regs)
{
pr_err("\n");
show_regs_print_info(KERN_ERR);
pr_err("PC: %08lx SP: %08lx\n", regs->pc, regs->sp);
pr_err("Status: %08lx ORIG_A4: %08lx\n", regs->csr, regs->orig_a4);
pr_err("A0: %08lx B0: %08lx\n", regs->a0, regs->b0);
pr_err("A1: %08lx B1: %08lx\n", regs->a1, regs->b1);
pr_err("A2: %08lx B2: %08lx\n", regs->a2, regs->b2);
pr_err("A3: %08lx B3: %08lx\n", regs->a3, regs->b3);
pr_err("A4: %08lx B4: %08lx\n", regs->a4, regs->b4);
pr_err("A5: %08lx B5: %08lx\n", regs->a5, regs->b5);
pr_err("A6: %08lx B6: %08lx\n", regs->a6, regs->b6);
pr_err("A7: %08lx B7: %08lx\n", regs->a7, regs->b7);
pr_err("A8: %08lx B8: %08lx\n", regs->a8, regs->b8);
pr_err("A9: %08lx B9: %08lx\n", regs->a9, regs->b9);
pr_err("A10: %08lx B10: %08lx\n", regs->a10, regs->b10);
pr_err("A11: %08lx B11: %08lx\n", regs->a11, regs->b11);
pr_err("A12: %08lx B12: %08lx\n", regs->a12, regs->b12);
pr_err("A13: %08lx B13: %08lx\n", regs->a13, regs->b13);
pr_err("A14: %08lx B14: %08lx\n", regs->a14, regs->dp);
pr_err("A15: %08lx B15: %08lx\n", regs->a15, regs->sp);
pr_err("A16: %08lx B16: %08lx\n", regs->a16, regs->b16);
pr_err("A17: %08lx B17: %08lx\n", regs->a17, regs->b17);
pr_err("A18: %08lx B18: %08lx\n", regs->a18, regs->b18);
pr_err("A19: %08lx B19: %08lx\n", regs->a19, regs->b19);
pr_err("A20: %08lx B20: %08lx\n", regs->a20, regs->b20);
pr_err("A21: %08lx B21: %08lx\n", regs->a21, regs->b21);
pr_err("A22: %08lx B22: %08lx\n", regs->a22, regs->b22);
pr_err("A23: %08lx B23: %08lx\n", regs->a23, regs->b23);
pr_err("A24: %08lx B24: %08lx\n", regs->a24, regs->b24);
pr_err("A25: %08lx B25: %08lx\n", regs->a25, regs->b25);
pr_err("A26: %08lx B26: %08lx\n", regs->a26, regs->b26);
pr_err("A27: %08lx B27: %08lx\n", regs->a27, regs->b27);
pr_err("A28: %08lx B28: %08lx\n", regs->a28, regs->b28);
pr_err("A29: %08lx B29: %08lx\n", regs->a29, regs->b29);
pr_err("A30: %08lx B30: %08lx\n", regs->a30, regs->b30);
pr_err("A31: %08lx B31: %08lx\n", regs->a31, regs->b31);
}
void die(char *str, struct pt_regs *fp, int nr)
{
console_verbose();
pr_err("%s: %08x\n", str, nr);
show_regs(fp);
pr_err("Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, (PAGE_SIZE +
(unsigned long) current));
dump_stack();
while (1)
;
}
static void die_if_kernel(char *str, struct pt_regs *fp, int nr)
{
if (user_mode(fp))
return;
die(str, fp, nr);
}
/* Internal exceptions */
static struct exception_info iexcept_table[10] = {
{ "Oops - instruction fetch", SIGBUS, BUS_ADRERR },
{ "Oops - fetch packet", SIGBUS, BUS_ADRERR },
{ "Oops - execute packet", SIGILL, ILL_ILLOPC },
{ "Oops - undefined instruction", SIGILL, ILL_ILLOPC },
{ "Oops - resource conflict", SIGILL, ILL_ILLOPC },
{ "Oops - resource access", SIGILL, ILL_PRVREG },
{ "Oops - privilege", SIGILL, ILL_PRVOPC },
{ "Oops - loops buffer", SIGILL, ILL_ILLOPC },
{ "Oops - software exception", SIGILL, ILL_ILLTRP },
{ "Oops - unknown exception", SIGILL, ILL_ILLOPC }
};
/* External exceptions */
static struct exception_info eexcept_table[128] = {
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - external exception", SIGBUS, BUS_ADRERR },
{ "Oops - CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L1P", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L1D", SIGSEGV, SEGV_ACCERR },
{ "Oops - CPU memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
{ "Oops - DMA memory protection fault in L2", SIGSEGV, SEGV_ACCERR },
{ "Oops - EMC CPU memory protection fault", SIGSEGV, SEGV_ACCERR },
{ "Oops - EMC bus error", SIGBUS, BUS_ADRERR }
};
static void do_trap(struct exception_info *except_info, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
siginfo_t info;
if (except_info->code != TRAP_BRKPT)
pr_err("TRAP: %s PC[0x%lx] signo[%d] code[%d]\n",
except_info->kernel_str, regs->pc,
except_info->signo, except_info->code);
die_if_kernel(except_info->kernel_str, regs, addr);
info.si_signo = except_info->signo;
info.si_errno = 0;
info.si_code = except_info->code;
info.si_addr = (void __user *)addr;
force_sig_info(except_info->signo, &info, current);
}
/*
* Process an internal exception (non maskable)
*/
static int process_iexcept(struct pt_regs *regs)
{
unsigned int iexcept_report = get_iexcept();
unsigned int iexcept_num;
ack_exception(EXCEPT_TYPE_IXF);
pr_err("IEXCEPT: PC[0x%lx]\n", regs->pc);
while (iexcept_report) {
iexcept_num = __ffs(iexcept_report);
iexcept_report &= ~(1 << iexcept_num);
set_iexcept(iexcept_report);
if (*(unsigned int *)regs->pc == BKPT_OPCODE) {
/* This is a breakpoint */
struct exception_info bkpt_exception = {
"Oops - undefined instruction",
SIGTRAP, TRAP_BRKPT
};
do_trap(&bkpt_exception, regs);
iexcept_report &= ~(0xFF);
set_iexcept(iexcept_report);
continue;
}
do_trap(&iexcept_table[iexcept_num], regs);
}
return 0;
}
/*
* Process an external exception (maskable)
*/
static void process_eexcept(struct pt_regs *regs)
{
int evt;
pr_err("EEXCEPT: PC[0x%lx]\n", regs->pc);
while ((evt = soc_get_exception()) >= 0)
do_trap(&eexcept_table[evt], regs);
ack_exception(EXCEPT_TYPE_EXC);
}
/*
* Main exception processing
*/
asmlinkage int process_exception(struct pt_regs *regs)
{
unsigned int type;
unsigned int type_num;
unsigned int ie_num = 9; /* default is unknown exception */
while ((type = get_except_type()) != 0) {
type_num = fls(type) - 1;
switch (type_num) {
case EXCEPT_TYPE_NXF:
ack_exception(EXCEPT_TYPE_NXF);
if (c6x_nmi_handler)
(c6x_nmi_handler)(regs);
else
pr_alert("NMI interrupt!\n");
break;
case EXCEPT_TYPE_IXF:
if (process_iexcept(regs))
return 1;
break;
case EXCEPT_TYPE_EXC:
process_eexcept(regs);
break;
case EXCEPT_TYPE_SXF:
ie_num = 8;
default:
ack_exception(type_num);
do_trap(&iexcept_table[ie_num], regs);
break;
}
}
return 0;
}
static int kstack_depth_to_print = 48;
static void show_trace(unsigned long *stack, unsigned long *endstack)
{
unsigned long addr;
int i;
pr_debug("Call trace:");
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
pr_debug("\n ");
#endif
pr_debug(" [<%08lx>]", addr);
print_symbol(" %s\n", addr);
i++;
}
}
pr_debug("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *p, *endstack;
int i;
if (!stack) {
if (task && task != current)
/* We know this is a kernel stack,
so this is the start/end */
stack = (unsigned long *)thread_saved_ksp(task);
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1)
& -THREAD_SIZE);
pr_debug("Stack from %08lx:", (unsigned long)stack);
for (i = 0, p = stack; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
pr_cont("\n ");
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
show_trace(stack, endstack);
}
int is_valid_bugaddr(unsigned long addr)
{
return __kernel_text_address(addr);
}
| gpl-2.0 |
aksalj/kernel_rpi | tools/testing/selftests/net/socket.c | 4456 | 1914 | #include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
struct socket_testcase {
int domain;
int type;
int protocol;
/* 0 = valid file descriptor
* -foo = error foo
*/
int expect;
/* If non-zero, accept EAFNOSUPPORT to handle the case
* of the protocol not being configured into the kernel.
*/
int nosupport_ok;
};
static struct socket_testcase tests[] = {
{ AF_MAX, 0, 0, -EAFNOSUPPORT, 0 },
{ AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, 1 },
{ AF_INET, SOCK_DGRAM, IPPROTO_TCP, -EPROTONOSUPPORT, 1 },
{ AF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, 1 },
{ AF_INET, SOCK_STREAM, IPPROTO_UDP, -EPROTONOSUPPORT, 1 },
};
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define ERR_STRING_SZ 64
static int run_tests(void)
{
char err_string1[ERR_STRING_SZ];
char err_string2[ERR_STRING_SZ];
int i, err;
err = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct socket_testcase *s = &tests[i];
int fd;
fd = socket(s->domain, s->type, s->protocol);
if (fd < 0) {
if (s->nosupport_ok &&
errno == EAFNOSUPPORT)
continue;
if (s->expect < 0 &&
errno == -s->expect)
continue;
strerror_r(-s->expect, err_string1, ERR_STRING_SZ);
strerror_r(errno, err_string2, ERR_STRING_SZ);
fprintf(stderr, "socket(%d, %d, %d) expected "
"err (%s) got (%s)\n",
s->domain, s->type, s->protocol,
err_string1, err_string2);
err = -1;
break;
} else {
close(fd);
if (s->expect < 0) {
strerror_r(errno, err_string1, ERR_STRING_SZ);
fprintf(stderr, "socket(%d, %d, %d) expected "
"success got err (%s)\n",
s->domain, s->type, s->protocol,
err_string1);
err = -1;
break;
}
}
}
return err;
}
int main(void)
{
int err = run_tests();
return err;
}
| gpl-2.0 |
KylinUI/android_kernel_samsung_hlte | drivers/tty/serial/mfd.c | 4968 | 37169 | /*
* mfd.c: driver for High Speed UART device of Intel Medfield platform
*
* Refer pxa.c, 8250.c and some other drivers in drivers/serial/
*
* (C) Copyright 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
/* Notes:
* 1. DMA channel allocation: 0/1 channel are assigned to port 0,
* 2/3 chan to port 1, 4/5 chan to port 3. Even number chans
* are used for RX, odd chans for TX
*
* 2. The RI/DSR/DCD/DTR are not pinned out, DCD & DSR are always
* asserted, only when the HW is reset the DDCD and DDSR will
* be triggered
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial_mfd.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#define HSU_DMA_BUF_SIZE 2048
#define chan_readl(chan, offset) readl(chan->reg + offset)
#define chan_writel(chan, offset, val) writel(val, chan->reg + offset)
#define mfd_readl(obj, offset) readl(obj->reg + offset)
#define mfd_writel(obj, offset, val) writel(val, obj->reg + offset)
static int hsu_dma_enable;
module_param(hsu_dma_enable, int, 0);
MODULE_PARM_DESC(hsu_dma_enable,
"It is a bitmap to set working mode, if bit[x] is 1, then port[x] will work in DMA mode, otherwise in PIO mode.");
struct hsu_dma_buffer {
u8 *buf;
dma_addr_t dma_addr;
u32 dma_size;
u32 ofs;
};
struct hsu_dma_chan {
u32 id;
enum dma_data_direction dirt;
struct uart_hsu_port *uport;
void __iomem *reg;
};
struct uart_hsu_port {
struct uart_port port;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
unsigned int lsr_break_flag;
char name[12];
int index;
struct device *dev;
struct hsu_dma_chan *txc;
struct hsu_dma_chan *rxc;
struct hsu_dma_buffer txbuf;
struct hsu_dma_buffer rxbuf;
int use_dma; /* flag for DMA/PIO */
int running;
int dma_tx_on;
};
/* Top level data structure of HSU */
struct hsu_port {
void __iomem *reg;
unsigned long paddr;
unsigned long iolen;
u32 irq;
struct uart_hsu_port port[3];
struct hsu_dma_chan chans[10];
struct dentry *debugfs;
};
static inline unsigned int serial_in(struct uart_hsu_port *up, int offset)
{
unsigned int val;
if (offset > UART_MSR) {
offset <<= 2;
val = readl(up->port.membase + offset);
} else
val = (unsigned int)readb(up->port.membase + offset);
return val;
}
static inline void serial_out(struct uart_hsu_port *up, int offset, int value)
{
if (offset > UART_MSR) {
offset <<= 2;
writel(value, up->port.membase + offset);
} else {
unsigned char val = value & 0xff;
writeb(val, up->port.membase + offset);
}
}
#ifdef CONFIG_DEBUG_FS
#define HSU_REGS_BUFSIZE 1024
static ssize_t port_show_regs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct uart_hsu_port *up = file->private_data;
char *buf;
u32 len = 0;
ssize_t ret;
buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
if (!buf)
return 0;
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MFD HSU port[%d] regs:\n", up->index);
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"IER: \t\t0x%08x\n", serial_in(up, UART_IER));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"IIR: \t\t0x%08x\n", serial_in(up, UART_IIR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"LCR: \t\t0x%08x\n", serial_in(up, UART_LCR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MCR: \t\t0x%08x\n", serial_in(up, UART_MCR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"LSR: \t\t0x%08x\n", serial_in(up, UART_LSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MSR: \t\t0x%08x\n", serial_in(up, UART_MSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"FOR: \t\t0x%08x\n", serial_in(up, UART_FOR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"PS: \t\t0x%08x\n", serial_in(up, UART_PS));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MUL: \t\t0x%08x\n", serial_in(up, UART_MUL));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"DIV: \t\t0x%08x\n", serial_in(up, UART_DIV));
if (len > HSU_REGS_BUFSIZE)
len = HSU_REGS_BUFSIZE;
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static ssize_t dma_show_regs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct hsu_dma_chan *chan = file->private_data;
char *buf;
u32 len = 0;
ssize_t ret;
buf = kzalloc(HSU_REGS_BUFSIZE, GFP_KERNEL);
if (!buf)
return 0;
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MFD HSU DMA channel [%d] regs:\n", chan->id);
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"=================================\n");
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"CR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_CR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"DCR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_DCR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"BSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_BSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"MOTSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_MOTSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0SAR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D0TSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1SAR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D1TSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2SAR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D2TSR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0SAR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3SAR));
len += snprintf(buf + len, HSU_REGS_BUFSIZE - len,
"D0TSR: \t\t0x%08x\n", chan_readl(chan, HSU_CH_D3TSR));
if (len > HSU_REGS_BUFSIZE)
len = HSU_REGS_BUFSIZE;
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations port_regs_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = port_show_regs,
.llseek = default_llseek,
};
static const struct file_operations dma_regs_ops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = dma_show_regs,
.llseek = default_llseek,
};
static int hsu_debugfs_init(struct hsu_port *hsu)
{
int i;
char name[32];
hsu->debugfs = debugfs_create_dir("hsu", NULL);
if (!hsu->debugfs)
return -ENOMEM;
for (i = 0; i < 3; i++) {
snprintf(name, sizeof(name), "port_%d_regs", i);
debugfs_create_file(name, S_IFREG | S_IRUGO,
hsu->debugfs, (void *)(&hsu->port[i]), &port_regs_ops);
}
for (i = 0; i < 6; i++) {
snprintf(name, sizeof(name), "dma_chan_%d_regs", i);
debugfs_create_file(name, S_IFREG | S_IRUGO,
hsu->debugfs, (void *)&hsu->chans[i], &dma_regs_ops);
}
return 0;
}
static void hsu_debugfs_remove(struct hsu_port *hsu)
{
if (hsu->debugfs)
debugfs_remove_recursive(hsu->debugfs);
}
#else
static inline int hsu_debugfs_init(struct hsu_port *hsu)
{
return 0;
}
static inline void hsu_debugfs_remove(struct hsu_port *hsu)
{
}
#endif /* CONFIG_DEBUG_FS */
static void serial_hsu_enable_ms(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
}
void hsu_dma_tx(struct uart_hsu_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
struct hsu_dma_buffer *dbuf = &up->txbuf;
int count;
/* test_and_set_bit may be better, but anyway it's in lock protected mode */
if (up->dma_tx_on)
return;
/* Update the circ buf info */
xmit->tail += dbuf->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
up->port.icount.tx += dbuf->ofs;
dbuf->ofs = 0;
/* Disable the channel */
chan_writel(up->txc, HSU_CH_CR, 0x0);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
dma_sync_single_for_device(up->port.dev,
dbuf->dma_addr,
dbuf->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
dbuf->ofs = count;
/* Reprogram the channel */
chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
chan_writel(up->txc, HSU_CH_D0TSR, count);
/* Reenable the channel */
chan_writel(up->txc, HSU_CH_DCR, 0x1
| (0x1 << 8)
| (0x1 << 16)
| (0x1 << 24));
up->dma_tx_on = 1;
chan_writel(up->txc, HSU_CH_CR, 0x1);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
}
/* The buffer is already cache coherent */
void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
{
dbuf->ofs = 0;
chan_writel(rxc, HSU_CH_BSR, 32);
chan_writel(rxc, HSU_CH_MOTSR, 4);
chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
chan_writel(rxc, HSU_CH_CR, 0x3);
}
/* Protected by spin_lock_irqsave(port->lock) */
static void serial_hsu_start_tx(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
if (up->use_dma) {
hsu_dma_tx(up);
} else if (!(up->ier & UART_IER_THRI)) {
up->ier |= UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
static void serial_hsu_stop_tx(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
struct hsu_dma_chan *txc = up->txc;
if (up->use_dma)
chan_writel(txc, HSU_CH_CR, 0x0);
else if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
}
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
struct uart_port *port = &up->port;
struct tty_struct *tty = port->state->port.tty;
int count;
if (!tty)
return;
/*
* First need to know how many is already transferred,
* then check if its a timeout DMA irq, and return
* the trail bytes out, push them up and reenable the
* channel
*/
/* Timeout IRQ, need wait some time, see Errata 2 */
if (int_sts & 0xf00)
udelay(2);
/* Stop the channel */
chan_writel(chan, HSU_CH_CR, 0x0);
count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
if (!count) {
/* Restart the channel before we leave */
chan_writel(chan, HSU_CH_CR, 0x3);
return;
}
dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/*
* Head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
tty_insert_flip_string(tty, dbuf->buf, count);
port->icount.rx += count;
dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
dbuf->dma_size, DMA_FROM_DEVICE);
/* Reprogram the channel */
chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
chan_writel(chan, HSU_CH_DCR, 0x1
| (0x1 << 8)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
tty_flip_buffer_push(tty);
chan_writel(chan, HSU_CH_CR, 0x3);
}
static void serial_hsu_stop_rx(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
struct hsu_dma_chan *chan = up->rxc;
if (up->use_dma)
chan_writel(chan, HSU_CH_CR, 0x2);
else {
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
}
}
static inline void receive_chars(struct uart_hsu_port *up, int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch, flag;
unsigned int max_count = 256;
if (!tty)
return;
do {
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE))) {
dev_warn(up->dev, "We really rush into ERR/BI case"
"status = 0x%02x", *status);
/* For statistics only */
if (*status & UART_LSR_BI) {
*status &= ~(UART_LSR_FE | UART_LSR_PE);
up->port.icount.brk++;
/*
* We do the SysRQ and SAK checking
* here because otherwise the break
* may get masked by ignore_status_mask
* or read_status_mask.
*/
if (uart_handle_break(&up->port))
goto ignore_char;
} else if (*status & UART_LSR_PE)
up->port.icount.parity++;
else if (*status & UART_LSR_FE)
up->port.icount.frame++;
if (*status & UART_LSR_OE)
up->port.icount.overrun++;
/* Mask off conditions which should be ignored. */
*status &= up->port.read_status_mask;
#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
if (up->port.cons &&
up->port.cons->index == up->port.line) {
/* Recover the break flag from console xmit */
*status |= up->lsr_break_flag;
up->lsr_break_flag = 0;
}
#endif
if (*status & UART_LSR_BI) {
flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && max_count--);
tty_flip_buffer_push(tty);
}
static void transmit_chars(struct uart_hsu_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
serial_hsu_stop_tx(&up->port);
return;
}
/* The IRQ is for TX FIFO half-empty */
count = up->port.fifosize / 2;
do {
serial_out(up, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
if (uart_circ_empty(xmit))
serial_hsu_stop_tx(&up->port);
}
static inline void check_modem_status(struct uart_hsu_port *up)
{
int status;
status = serial_in(up, UART_MSR);
if ((status & UART_MSR_ANY_DELTA) == 0)
return;
if (status & UART_MSR_TERI)
up->port.icount.rng++;
if (status & UART_MSR_DDSR)
up->port.icount.dsr++;
/* We may only get DDCD when HW init and reset */
if (status & UART_MSR_DDCD)
uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
/* Will start/stop_tx accordingly */
if (status & UART_MSR_DCTS)
uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
wake_up_interruptible(&up->port.state->port.delta_msr_wait);
}
/*
* This handles the interrupt from one port.
*/
static irqreturn_t port_irq(int irq, void *dev_id)
{
struct uart_hsu_port *up = dev_id;
unsigned int iir, lsr;
unsigned long flags;
if (unlikely(!up->running))
return IRQ_NONE;
spin_lock_irqsave(&up->port.lock, flags);
if (up->use_dma) {
lsr = serial_in(up, UART_LSR);
if (unlikely(lsr & (UART_LSR_BI | UART_LSR_PE |
UART_LSR_FE | UART_LSR_OE)))
dev_warn(up->dev,
"Got lsr irq while using DMA, lsr = 0x%2x\n",
lsr);
check_modem_status(up);
spin_unlock_irqrestore(&up->port.lock, flags);
return IRQ_HANDLED;
}
iir = serial_in(up, UART_IIR);
if (iir & UART_IIR_NO_INT) {
spin_unlock_irqrestore(&up->port.lock, flags);
return IRQ_NONE;
}
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(up, &lsr);
check_modem_status(up);
/* lsr will be renewed during the receive_chars */
if (lsr & UART_LSR_THRE)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
return IRQ_HANDLED;
}
static inline void dma_chan_irq(struct hsu_dma_chan *chan)
{
struct uart_hsu_port *up = chan->uport;
unsigned long flags;
u32 int_sts;
spin_lock_irqsave(&up->port.lock, flags);
if (!up->use_dma || !up->running)
goto exit;
/*
* No matter what situation, need read clear the IRQ status
* There is a bug, see Errata 5, HSD 2900918
*/
int_sts = chan_readl(chan, HSU_CH_SR);
/* Rx channel */
if (chan->dirt == DMA_FROM_DEVICE)
hsu_dma_rx(up, int_sts);
/* Tx channel */
if (chan->dirt == DMA_TO_DEVICE) {
chan_writel(chan, HSU_CH_CR, 0x0);
up->dma_tx_on = 0;
hsu_dma_tx(up);
}
exit:
spin_unlock_irqrestore(&up->port.lock, flags);
return;
}
static irqreturn_t dma_irq(int irq, void *dev_id)
{
struct hsu_port *hsu = dev_id;
u32 int_sts, i;
int_sts = mfd_readl(hsu, HSU_GBL_DMAISR);
/* Currently we only have 6 channels may be used */
for (i = 0; i < 6; i++) {
if (int_sts & 0x1)
dma_chan_irq(&hsu->chans[i]);
int_sts >>= 1;
}
return IRQ_HANDLED;
}
static unsigned int serial_hsu_tx_empty(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
return ret;
}
static unsigned int serial_hsu_get_mctrl(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned char status;
unsigned int ret;
status = serial_in(up, UART_MSR);
ret = 0;
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
if (status & UART_MSR_RI)
ret |= TIOCM_RNG;
if (status & UART_MSR_DSR)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
return ret;
}
static void serial_hsu_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned char mcr = 0;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
mcr |= up->mcr;
serial_out(up, UART_MCR, mcr);
}
static void serial_hsu_break_ctl(struct uart_port *port, int break_state)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned long flags;
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
else
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
* What special to do:
* 1. chose the 64B fifo mode
* 2. start dma or pio depends on configuration
* 3. we only allocate dma memory when needed
*/
static int serial_hsu_startup(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned long flags;
pm_runtime_get_sync(up->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
*/
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
/* Clear the interrupt registers. */
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
/* Now, initialize the UART, default is 8n1 */
serial_out(up, UART_LCR, UART_LCR_WLEN8);
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl |= TIOCM_OUT2;
serial_hsu_set_mctrl(&up->port, up->port.mctrl);
/*
* Finally, enable interrupts. Note: Modem status interrupts
* are set via set_termios(), which will be occurring imminently
* anyway, so we don't enable them here.
*/
if (!up->use_dma)
up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
else
up->ier = 0;
serial_out(up, UART_IER, up->ier);
spin_unlock_irqrestore(&up->port.lock, flags);
/* DMA init */
if (up->use_dma) {
struct hsu_dma_buffer *dbuf;
struct circ_buf *xmit = &port->state->xmit;
up->dma_tx_on = 0;
/* First allocate the RX buffer */
dbuf = &up->rxbuf;
dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
if (!dbuf->buf) {
up->use_dma = 0;
goto exit;
}
dbuf->dma_addr = dma_map_single(port->dev,
dbuf->buf,
HSU_DMA_BUF_SIZE,
DMA_FROM_DEVICE);
dbuf->dma_size = HSU_DMA_BUF_SIZE;
/* Start the RX channel right now */
hsu_dma_start_rx_chan(up->rxc, dbuf);
/* Next init the TX DMA */
dbuf = &up->txbuf;
dbuf->buf = xmit->buf;
dbuf->dma_addr = dma_map_single(port->dev,
dbuf->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
dbuf->dma_size = UART_XMIT_SIZE;
/* This should not be changed all around */
chan_writel(up->txc, HSU_CH_BSR, 32);
chan_writel(up->txc, HSU_CH_MOTSR, 4);
dbuf->ofs = 0;
}
exit:
/* And clear the interrupt registers again for luck. */
(void) serial_in(up, UART_LSR);
(void) serial_in(up, UART_RX);
(void) serial_in(up, UART_IIR);
(void) serial_in(up, UART_MSR);
up->running = 1;
return 0;
}
static void serial_hsu_shutdown(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned long flags;
/* Disable interrupts from this port */
up->ier = 0;
serial_out(up, UART_IER, 0);
up->running = 0;
spin_lock_irqsave(&up->port.lock, flags);
up->port.mctrl &= ~TIOCM_OUT2;
serial_hsu_set_mctrl(&up->port, up->port.mctrl);
spin_unlock_irqrestore(&up->port.lock, flags);
/* Disable break condition and FIFOs */
serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
serial_out(up, UART_FCR, 0);
pm_runtime_put(up->dev);
}
static void
serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
u32 ps, mul;
switch (termios->c_cflag & CSIZE) {
case CS5:
cval = UART_LCR_WLEN5;
break;
case CS6:
cval = UART_LCR_WLEN6;
break;
case CS7:
cval = UART_LCR_WLEN7;
break;
default:
case CS8:
cval = UART_LCR_WLEN8;
break;
}
/* CMSPAR isn't supported by this driver */
termios->c_cflag &= ~CMSPAR;
if (termios->c_cflag & CSTOPB)
cval |= UART_LCR_STOP;
if (termios->c_cflag & PARENB)
cval |= UART_LCR_PARITY;
if (!(termios->c_cflag & PARODD))
cval |= UART_LCR_EPAR;
/*
* The base clk is 50Mhz, and the baud rate come from:
* baud = 50M * MUL / (DIV * PS * DLAB)
*
* For those basic low baud rate we can get the direct
* scalar from 2746800, like 115200 = 2746800/24. For those
* higher baud rate, we handle them case by case, mainly by
* adjusting the MUL/PS registers, and DIV register is kept
* as default value 0x3d09 to make things simple
*/
baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
quot = 1;
ps = 0x10;
mul = 0x3600;
switch (baud) {
case 3500000:
mul = 0x3345;
ps = 0xC;
break;
case 1843200:
mul = 0x2400;
break;
case 3000000:
case 2500000:
case 2000000:
case 1500000:
case 1000000:
case 500000:
/* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
mul = baud / 500000 * 0x9C4;
break;
default:
/* Use uart_get_divisor to get quot for other baud rates */
quot = 0;
}
if (!quot)
quot = uart_get_divisor(port, baud);
if ((up->port.uartclk / quot) < (2400 * 16))
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_1B;
else if ((up->port.uartclk / quot) < (230400 * 16))
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_16B;
else
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_HSU_64_32B;
fcr |= UART_FCR_HSU_64B_FIFO;
/*
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
spin_lock_irqsave(&up->port.lock, flags);
/* Update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
if (termios->c_iflag & INPCK)
up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (termios->c_iflag & (BRKINT | PARMRK))
up->port.read_status_mask |= UART_LSR_BI;
/* Characters to ignore */
up->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
if (termios->c_iflag & IGNBRK) {
up->port.ignore_status_mask |= UART_LSR_BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
up->port.ignore_status_mask |= UART_LSR_OE;
}
/* Ignore all characters if CREAD is not set */
if ((termios->c_cflag & CREAD) == 0)
up->port.ignore_status_mask |= UART_LSR_DR;
/*
* CTS flow control flag and modem status interrupts, disable
* MSI by default
*/
up->ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(&up->port, termios->c_cflag))
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
if (termios->c_cflag & CRTSCTS)
up->mcr |= UART_MCR_AFE | UART_MCR_RTS;
else
up->mcr &= ~UART_MCR_AFE;
serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
serial_out(up, UART_LCR, cval); /* reset DLAB */
serial_out(up, UART_MUL, mul); /* set MUL */
serial_out(up, UART_PS, ps); /* set PS */
up->lcr = cval; /* Save LCR */
serial_hsu_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static void
serial_hsu_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
}
static void serial_hsu_release_port(struct uart_port *port)
{
}
static int serial_hsu_request_port(struct uart_port *port)
{
return 0;
}
static void serial_hsu_config_port(struct uart_port *port, int flags)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
up->port.type = PORT_MFD;
}
static int
serial_hsu_verify_port(struct uart_port *port, struct serial_struct *ser)
{
/* We don't want the core code to modify any port params */
return -EINVAL;
}
static const char *
serial_hsu_type(struct uart_port *port)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
return up->name;
}
/* Mainly for uart console use */
static struct uart_hsu_port *serial_hsu_ports[3];
static struct uart_driver serial_hsu_reg;
#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
/* Wait for transmitter & holding register to empty */
static inline void wait_for_xmitr(struct uart_hsu_port *up)
{
unsigned int status, tmout = 1000;
/* Wait up to 1ms for the character to be sent. */
do {
status = serial_in(up, UART_LSR);
if (status & UART_LSR_BI)
up->lsr_break_flag = UART_LSR_BI;
if (--tmout == 0)
break;
udelay(1);
} while (!(status & BOTH_EMPTY));
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
tmout = 1000000;
while (--tmout &&
((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0))
udelay(1);
}
}
static void serial_hsu_console_putchar(struct uart_port *port, int ch)
{
struct uart_hsu_port *up =
container_of(port, struct uart_hsu_port, port);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
}
/*
* Print a string to the serial port trying not to disturb
* any possible real use of the port...
*
* The console_lock must be held when we get here.
*/
static void
serial_hsu_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_hsu_port *up = serial_hsu_ports[co->index];
unsigned long flags;
unsigned int ier;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
} else
spin_lock(&up->port.lock);
/* First save the IER then disable the interrupts */
ier = serial_in(up, UART_IER);
serial_out(up, UART_IER, 0);
uart_console_write(&up->port, s, count, serial_hsu_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore the IER
*/
wait_for_xmitr(up);
serial_out(up, UART_IER, ier);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
}
static struct console serial_hsu_console;
static int __init
serial_hsu_console_setup(struct console *co, char *options)
{
struct uart_hsu_port *up;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (co->index == -1 || co->index >= serial_hsu_reg.nr)
co->index = 0;
up = serial_hsu_ports[co->index];
if (!up)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_hsu_console = {
.name = "ttyMFD",
.write = serial_hsu_console_write,
.device = uart_console_device,
.setup = serial_hsu_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &serial_hsu_reg,
};
#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
#else
#define SERIAL_HSU_CONSOLE NULL
#endif
struct uart_ops serial_hsu_pops = {
.tx_empty = serial_hsu_tx_empty,
.set_mctrl = serial_hsu_set_mctrl,
.get_mctrl = serial_hsu_get_mctrl,
.stop_tx = serial_hsu_stop_tx,
.start_tx = serial_hsu_start_tx,
.stop_rx = serial_hsu_stop_rx,
.enable_ms = serial_hsu_enable_ms,
.break_ctl = serial_hsu_break_ctl,
.startup = serial_hsu_startup,
.shutdown = serial_hsu_shutdown,
.set_termios = serial_hsu_set_termios,
.pm = serial_hsu_pm,
.type = serial_hsu_type,
.release_port = serial_hsu_release_port,
.request_port = serial_hsu_request_port,
.config_port = serial_hsu_config_port,
.verify_port = serial_hsu_verify_port,
};
static struct uart_driver serial_hsu_reg = {
.owner = THIS_MODULE,
.driver_name = "MFD serial",
.dev_name = "ttyMFD",
.major = TTY_MAJOR,
.minor = 128,
.nr = 3,
.cons = SERIAL_HSU_CONSOLE,
};
#ifdef CONFIG_PM
static int serial_hsu_suspend(struct pci_dev *pdev, pm_message_t state)
{
void *priv = pci_get_drvdata(pdev);
struct uart_hsu_port *up;
/* Make sure this is not the internal dma controller */
if (priv && (pdev->device != 0x081E)) {
up = priv;
uart_suspend_port(&serial_hsu_reg, &up->port);
}
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int serial_hsu_resume(struct pci_dev *pdev)
{
void *priv = pci_get_drvdata(pdev);
struct uart_hsu_port *up;
int ret;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
dev_warn(&pdev->dev,
"HSU: can't re-enable device, try to continue\n");
if (priv && (pdev->device != 0x081E)) {
up = priv;
uart_resume_port(&serial_hsu_reg, &up->port);
}
return 0;
}
#else
#define serial_hsu_suspend NULL
#define serial_hsu_resume NULL
#endif
#ifdef CONFIG_PM_RUNTIME
static int serial_hsu_runtime_idle(struct device *dev)
{
int err;
err = pm_schedule_suspend(dev, 500);
if (err)
return -EBUSY;
return 0;
}
static int serial_hsu_runtime_suspend(struct device *dev)
{
return 0;
}
static int serial_hsu_runtime_resume(struct device *dev)
{
return 0;
}
#else
#define serial_hsu_runtime_idle NULL
#define serial_hsu_runtime_suspend NULL
#define serial_hsu_runtime_resume NULL
#endif
static const struct dev_pm_ops serial_hsu_pm_ops = {
.runtime_suspend = serial_hsu_runtime_suspend,
.runtime_resume = serial_hsu_runtime_resume,
.runtime_idle = serial_hsu_runtime_idle,
};
/* temp global pointer before we settle down on using one or four PCI dev */
static struct hsu_port *phsu;
static int serial_hsu_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct uart_hsu_port *uport;
int index, ret;
printk(KERN_INFO "HSU: found PCI Serial controller(ID: %04x:%04x)\n",
pdev->vendor, pdev->device);
switch (pdev->device) {
case 0x081B:
index = 0;
break;
case 0x081C:
index = 1;
break;
case 0x081D:
index = 2;
break;
case 0x081E:
/* internal DMA controller */
index = 3;
break;
default:
dev_err(&pdev->dev, "HSU: out of index!");
return -ENODEV;
}
ret = pci_enable_device(pdev);
if (ret)
return ret;
if (index == 3) {
/* DMA controller */
ret = request_irq(pdev->irq, dma_irq, 0, "hsu_dma", phsu);
if (ret) {
dev_err(&pdev->dev, "can not get IRQ\n");
goto err_disable;
}
pci_set_drvdata(pdev, phsu);
} else {
/* UART port 0~2 */
uport = &phsu->port[index];
uport->port.irq = pdev->irq;
uport->port.dev = &pdev->dev;
uport->dev = &pdev->dev;
ret = request_irq(pdev->irq, port_irq, 0, uport->name, uport);
if (ret) {
dev_err(&pdev->dev, "can not get IRQ\n");
goto err_disable;
}
uart_add_one_port(&serial_hsu_reg, &uport->port);
pci_set_drvdata(pdev, uport);
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
err_disable:
pci_disable_device(pdev);
return ret;
}
static void hsu_global_init(void)
{
struct hsu_port *hsu;
struct uart_hsu_port *uport;
struct hsu_dma_chan *dchan;
int i, ret;
hsu = kzalloc(sizeof(struct hsu_port), GFP_KERNEL);
if (!hsu)
return;
/* Get basic io resource and map it */
hsu->paddr = 0xffa28000;
hsu->iolen = 0x1000;
if (!(request_mem_region(hsu->paddr, hsu->iolen, "HSU global")))
pr_warning("HSU: error in request mem region\n");
hsu->reg = ioremap_nocache((unsigned long)hsu->paddr, hsu->iolen);
if (!hsu->reg) {
pr_err("HSU: error in ioremap\n");
ret = -ENOMEM;
goto err_free_region;
}
/* Initialise the 3 UART ports */
uport = hsu->port;
for (i = 0; i < 3; i++) {
uport->port.type = PORT_MFD;
uport->port.iotype = UPIO_MEM;
uport->port.mapbase = (resource_size_t)hsu->paddr
+ HSU_PORT_REG_OFFSET
+ i * HSU_PORT_REG_LENGTH;
uport->port.membase = hsu->reg + HSU_PORT_REG_OFFSET
+ i * HSU_PORT_REG_LENGTH;
sprintf(uport->name, "hsu_port%d", i);
uport->port.fifosize = 64;
uport->port.ops = &serial_hsu_pops;
uport->port.line = i;
uport->port.flags = UPF_IOREMAP;
/* set the scalable maxim support rate to 2746800 bps */
uport->port.uartclk = 115200 * 24 * 16;
uport->running = 0;
uport->txc = &hsu->chans[i * 2];
uport->rxc = &hsu->chans[i * 2 + 1];
serial_hsu_ports[i] = uport;
uport->index = i;
if (hsu_dma_enable & (1<<i))
uport->use_dma = 1;
else
uport->use_dma = 0;
uport++;
}
/* Initialise 6 dma channels */
dchan = hsu->chans;
for (i = 0; i < 6; i++) {
dchan->id = i;
dchan->dirt = (i & 0x1) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
dchan->uport = &hsu->port[i/2];
dchan->reg = hsu->reg + HSU_DMA_CHANS_REG_OFFSET +
i * HSU_DMA_CHANS_REG_LENGTH;
dchan++;
}
phsu = hsu;
hsu_debugfs_init(hsu);
return;
err_free_region:
release_mem_region(hsu->paddr, hsu->iolen);
kfree(hsu);
return;
}
static void serial_hsu_remove(struct pci_dev *pdev)
{
void *priv = pci_get_drvdata(pdev);
struct uart_hsu_port *up;
if (!priv)
return;
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
/* For port 0/1/2, priv is the address of uart_hsu_port */
if (pdev->device != 0x081E) {
up = priv;
uart_remove_one_port(&serial_hsu_reg, &up->port);
}
pci_set_drvdata(pdev, NULL);
free_irq(pdev->irq, priv);
pci_disable_device(pdev);
}
/* First 3 are UART ports, and the 4th is the DMA */
static const struct pci_device_id pci_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081E) },
{},
};
static struct pci_driver hsu_pci_driver = {
.name = "HSU serial",
.id_table = pci_ids,
.probe = serial_hsu_probe,
.remove = __devexit_p(serial_hsu_remove),
.suspend = serial_hsu_suspend,
.resume = serial_hsu_resume,
.driver = {
.pm = &serial_hsu_pm_ops,
},
};
static int __init hsu_pci_init(void)
{
int ret;
hsu_global_init();
ret = uart_register_driver(&serial_hsu_reg);
if (ret)
return ret;
return pci_register_driver(&hsu_pci_driver);
}
static void __exit hsu_pci_exit(void)
{
pci_unregister_driver(&hsu_pci_driver);
uart_unregister_driver(&serial_hsu_reg);
hsu_debugfs_remove(phsu);
kfree(phsu);
}
module_init(hsu_pci_init);
module_exit(hsu_pci_exit);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:medfield-hsu");
| gpl-2.0 |
akhilnarang/ThugLife_sprout | drivers/misc/mediatek/combo/drv_wlan/mt6620/wlan/mgmt/cnm_mem.c | 105 | 35975 | /*
** $Id: //Department/DaVinci/BRANCHES/MT6620_WIFI_DRIVER_V2_3/mgmt/cnm_mem.c#2 $
*/
/*! \file "cnm_mem.c"
\brief This file contain the management function of packet buffers and
generic memory alloc/free functioin for mailbox message.
A data packet has a fixed size of buffer, but a management
packet can be equipped with a variable size of buffer.
*/
/*
** $Log: cnm_mem.c $
*
* 07 17 2012 yuche.tsai
* NULL
* Compile no error before trial run.
*
* 03 14 2012 wh.su
* [WCXRP00001173] [MT6620 Wi-Fi][Driver] Adding the ICS Tethering WPA2-PSK supporting
* Add code from 2.2
*
* 11 17 2011 tsaiyuan.hsu
* [WCXRP00001115] [MT6620 Wi-Fi][DRV] avoid deactivating staRec when changing state 3 to 3.
* initialize fgNeedResp.
*
* 11 17 2011 tsaiyuan.hsu
* [WCXRP00001115] [MT6620 Wi-Fi][DRV] avoid deactivating staRec when changing state 3 to 3.
* avoid deactivating staRec when changing state from 3 to 3.
*
* 02 01 2011 cm.chang
* [WCXRP00000415] [MT6620 Wi-Fi][Driver] Check if any memory leakage happens when uninitializing in DGB mode
* .
*
* 01 26 2011 cm.chang
* [WCXRP00000395] [MT6620 Wi-Fi][Driver][FW] Search STA_REC with additional net type index argument
* Allocate system RAM if fixed message or mgmt buffer is not available
*
* 01 26 2011 cm.chang
* [WCXRP00000395] [MT6620 Wi-Fi][Driver][FW] Search STA_REC with additional net type index argument
* .
*
* 01 25 2011 yuche.tsai
* [WCXRP00000388] [Volunteer Patch][MT6620][Driver/Fw] change Station Type in station record.
* Change Station Type in Station Record, Modify MACRO definition for getting station type & network type index & Role.
*
* 12 13 2010 cp.wu
* [WCXRP00000260] [MT6620 Wi-Fi][Driver][Firmware] Create V1.1 branch for both firmware and driver
* create branch for Wi-Fi driver v1.1
*
* 12 07 2010 cm.chang
* [WCXRP00000239] MT6620 Wi-Fi][Driver][FW] Merge concurrent branch back to maintrunk
* 1. BSSINFO include RLM parameter
* 2. free all sta records when network is disconnected
*
* 11 29 2010 cm.chang
* [WCXRP00000210] [MT6620 Wi-Fi][Driver][FW] Set RCPI value in STA_REC for initial TX rate selection of auto-rate algorithm
* Sync RCPI of STA_REC to FW as reference of initial TX rate
*
* 11 25 2010 yuche.tsai
* NULL
* Update SLT Function for QoS Support and not be affected by fixed rate function.
*
* 10 18 2010 cp.wu
* [WCXRP00000053] [MT6620 Wi-Fi][Driver] Reset incomplete and might leads to BSOD when entering RF test with AIS associated
* 1. remove redundant variables in STA_REC structure
* 2. add STA-REC uninitialization routine for clearing pending events
*
* 10 13 2010 cm.chang
* [WCXRP00000094] [MT6620 Wi-Fi][Driver] Connect to 2.4GHz AP, Driver crash.
* Add exception handle when cmd buffer is not available
*
* 10 12 2010 cp.wu
* [WCXRP00000084] [MT6620 Wi-Fi][Driver][FW] Add fixed rate support for distance test
* add HT (802.11n) fixed rate support.
*
* 10 08 2010 cp.wu
* [WCXRP00000084] [MT6620 Wi-Fi][Driver][FW] Add fixed rate support for distance test
* adding fixed rate support for distance test. (from registry setting)
*
* 09 24 2010 wh.su
* NULL
* [WCXRP00005002][MT6620 Wi-Fi][Driver] Eliminate Linux Compile Warning.
*
* 09 21 2010 cp.wu
* [WCXRP00000053] [MT6620 Wi-Fi][Driver] Reset incomplete and might leads to BSOD when entering RF test with AIS associated
* Do a complete reset with STA-REC null checking for RF test re-entry
*
* 09 16 2010 cm.chang
* NULL
* Change conditional compiling options for BOW
*
* 09 10 2010 cm.chang
* NULL
* Always update Beacon content if FW sync OBSS info
*
* 08 24 2010 cm.chang
* NULL
* Support RLM initail channel of Ad-hoc, P2P and BOW
*
* 08 23 2010 chinghwa.yu
* NULL
* Update for BOW.
*
* 08 20 2010 cm.chang
* NULL
* Migrate RLM code to host from FW
*
* 08 19 2010 wh.su
* NULL
* adding the tx pkt call back handle for countermeasure.
*
* 07 08 2010 cp.wu
*
* [WPD00003833] [MT6620 and MT5931] Driver migration - move to new repository.
*
* 07 08 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Check draft RLM code for HT cap
*
* 07 07 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Support state of STA record change from 1 to 1
*
* 07 05 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Fix correct structure size in cnmStaSendDeactivateCmd()
*
* 07 05 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* 1) ignore RSN checking when RSN is not turned on.
* 2) set STA-REC deactivation callback as NULL
* 3) add variable initialization API based on PHY configuration
*
* 07 02 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* spin lock target revised
*
* 07 02 2010 cp.wu
* [WPD00003833][MT6620 and MT5931] Driver migration
* change inner loop index from i to k.
*
* 07 01 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Support sync command of STA_REC
*
* 06 23 2010 yarco.yang
* [WPD00003837][MT6620]Data Path Refine
* Merge g_arStaRec[] into adapter->arStaRec[]
*
* 06 18 2010 cm.chang
* [WPD00003841][LITE Driver] Migrate RLM/CNM to host driver
* Provide cnmMgtPktAlloc() and alloc/free function of msg/buf
*
* 05 31 2010 yarco.yang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Add RX TSF Log Feature and ADDBA Rsp with DECLINE handling
*
* 05 28 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Support checking of duplicated buffer free
*
* 05 28 2010 wh.su
* [BORA00000626][MT6620] Refine the remove key flow for WHQL testing
* fixed the ad-hoc wpa-none send non-encrypted frame issue.
*
* 05 28 2010 kevin.huang
* [BORA00000794][WIFISYS][New Feature]Power Management Support
* Move define of STA_REC_NUM to config.h and rename to CFG_STA_REC_NUM
*
* 05 12 2010 kevin.huang
* [BORA00000794][WIFISYS][New Feature]Power Management Support
* Add Power Management - Legacy PS-POLL support.
*
* 04 28 2010 tehuang.liu
* [BORA00000605][WIFISYS] Phase3 Integration
* Modified some MQM-related data structures (SN counter, TX/RX BA table)
*
* 04 27 2010 tehuang.liu
* [BORA00000605][WIFISYS] Phase3 Integration
* Added new TX/RX BA tables in STA_REC
*
* 04 27 2010 tehuang.liu
* [BORA00000605][WIFISYS] Phase3 Integration
* Notify MQM, TXM, and RXM upon disconnection .
*
* 04 26 2010 tehuang.liu
* [BORA00000605][WIFISYS] Phase3 Integration
* Call mqm, txm, rxm functions upon disconnection
*
* 04 24 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* g_aprBssInfo[] depends on CFG_SUPPORT_P2P and CFG_SUPPORT_BOW
*
* 04 22 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* First draft code to support protection in AP mode
*
* 04 19 2010 kevin.huang
* [BORA00000714][WIFISYS][New Feature]Beacon Timeout Support
* Add Beacon Timeout Support
* * * * * * * * * * and will send Null frame to diagnose connection
*
* 04 09 2010 tehuang.liu
* [BORA00000605][WIFISYS] Phase3 Integration
* [BORA00000644] WiFi phase 4 integration
* * Added per-TID SN cache in STA_REC
*
* 04 07 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Different invoking order for WTBL entry of associated AP
*
* 03 29 2010 wh.su
* [BORA00000605][WIFISYS] Phase3 Integration
* move the wlan table alloc / free to change state function.
*
* 03 24 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Support power control
*
* 03 03 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Initialize StaRec->arStaWaitQueue
*
* 03 03 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Add debug message when no available pkt buffer
*
* 03 01 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Fixed STA_REC initialization bug: prStaRec->au2CachedSeqCtrl[k]
*
* 02 26 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Added fgIsWmmSupported in STA_RECORD_T.
*
* 02 26 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Added fgIsUapsdSupported in STA_RECORD_T
*
* 02 26 2010 kevin.huang
* [BORA00000603][WIFISYS] [New Feature] AAA Module Support
* add support of Driver STA_RECORD_T activation
*
* 02 13 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Added arTspecTable in STA_REC for TSPEC management
*
* 02 12 2010 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Enable mgmt buffer debug by default
*
* 02 12 2010 tehuang.liu
* [BORA00000569][WIFISYS] Phase 2 Integration Test
* Added BUFFER_SOURCE_BCN
*
* 02 04 2010 kevin.huang
* [BORA00000603][WIFISYS] [New Feature] AAA Module Support
* Add AAA Module Support, Revise Net Type to Net Type Index for array lookup
*
* 01 11 2010 kevin.huang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Add Deauth and Disassoc Handler
*
* 01 08 2010 cp.wu
* [BORA00000368]Integrate HIF part into BORA
* 1) separate wifi_var_emu.c/.h from wifi_var.c/.h
* * * * * * * * * 2) eliminate HIF_EMULATION code sections appeared in wifi_var/cnm_mem
* * * * * * * * * 3) use cnmMemAlloc() instead to allocate SRAM buffer
*
* 12 25 2009 tehuang.liu
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Integrated modifications for 1st connection (mainly on FW modules MQM, TXM, and RXM)
* * * * * * * MQM: BA handling
* * * * * * * TXM: Macros updates
* * * * * * * RXM: Macros/Duplicate Removal updates
*
* 12 24 2009 yarco.yang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* .
*
* 12 21 2009 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* Support several data buffer banks.
*
* 12 18 2009 cm.chang
* [BORA00000018]Integrate WIFI part into BORA for the 1st time
* .For new FPGA memory size
*
* Dec 9 2009 MTK02468
* [BORA00000337] To check in codes for FPGA emulation
* Removed DBGPRINT
*
* Dec 9 2009 mtk02752
* [BORA00000368] Integrate HIF part into BORA
* add cnmDataPktFree() for emulation loopback purpose
*
* Dec 3 2009 mtk01461
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Fix warning of null pointer
*
* Dec 3 2009 mtk01461
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Add cnmGetStaRecByAddress() and add fgIsInUse flag in STA_RECORD_T
*
* Nov 23 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Assign ucBufferSource in function cnmMgtPktAlloc()
*
* Nov 23 2009 mtk02468
* [BORA00000337] To check in codes for FPGA emulation
* Added packet redispatch function calls
*
* Nov 13 2009 mtk01084
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* enable packet re-usable in current emulation driver
*
* Nov 12 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* 1. Add new function cnmGetStaRecByIndex()
* 2. Rename STA_REC_T to STA_RECORD_T
*
* Nov 9 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Call cnmDataPktDispatch() in cnmPktFree()
*
* Nov 2 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Remove definition of pragma section code
*
* Oct 28 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
*
*
* Oct 23 2009 mtk01461
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Fix lint warning
*
* Oct 23 2009 mtk01461
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
* Fix typo
*
* Oct 12 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
*
*
* Oct 8 2009 mtk01104
* [BORA00000018] Integrate WIFI part into BORA for the 1st time
*
**
*/
/*******************************************************************************
* C O M P I L E R F L A G S
********************************************************************************
*/
/*******************************************************************************
* E X T E R N A L R E F E R E N C E S
********************************************************************************
*/
#include "precomp.h"
/*******************************************************************************
* C O N S T A N T S
********************************************************************************
*/
/*******************************************************************************
* D A T A T Y P E S
********************************************************************************
*/
/*******************************************************************************
* P U B L I C D A T A
********************************************************************************
*/
/*******************************************************************************
* P R I V A T E D A T A
********************************************************************************
*/
/*******************************************************************************
* M A C R O S
********************************************************************************
*/
/*******************************************************************************
* F U N C T I O N D E C L A R A T I O N S
********************************************************************************
*/
static VOID
cnmStaRecHandleEventPkt(P_ADAPTER_T prAdapter, P_CMD_INFO_T prCmdInfo, PUINT_8 pucEventBuf);
static VOID cnmStaSendUpdateCmd(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec, BOOLEAN fgNeedResp);
static VOID cnmStaSendRemoveCmd(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec);
/*******************************************************************************
* F U N C T I O N S
********************************************************************************
*/
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
P_MSDU_INFO_T cnmMgtPktAlloc(P_ADAPTER_T prAdapter, UINT_32 u4Length)
{
P_MSDU_INFO_T prMsduInfo;
P_QUE_T prQueList;
KAL_SPIN_LOCK_DECLARATION();
ASSERT(prAdapter);
prQueList = &prAdapter->rTxCtrl.rFreeMsduInfoList;
/* Get a free MSDU_INFO_T */
KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
QUEUE_REMOVE_HEAD(prQueList, prMsduInfo, P_MSDU_INFO_T);
KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
if (prMsduInfo) {
prMsduInfo->prPacket = cnmMemAlloc(prAdapter, RAM_TYPE_BUF, u4Length);
prMsduInfo->eSrc = TX_PACKET_MGMT;
if (prMsduInfo->prPacket == NULL) {
KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
QUEUE_INSERT_TAIL(prQueList, &prMsduInfo->rQueEntry);
KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
prMsduInfo = NULL;
}
}
#if DBG
if (prMsduInfo == NULL) {
DBGLOG(MEM, WARN, ("\n"));
DBGLOG(MEM, WARN, ("MgtDesc#=%ld\n", prQueList->u4NumElem));
#if CFG_DBG_MGT_BUF
DBGLOG(MEM, WARN, ("rMgtBufInfo: alloc#=%ld, free#=%ld, null#=%ld\n",
prAdapter->rMgtBufInfo.u4AllocCount,
prAdapter->rMgtBufInfo.u4FreeCount,
prAdapter->rMgtBufInfo.u4AllocNullCount));
#endif
DBGLOG(MEM, WARN, ("\n"));
}
#endif
return prMsduInfo;
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
VOID cnmMgtPktFree(P_ADAPTER_T prAdapter, P_MSDU_INFO_T prMsduInfo)
{
P_QUE_T prQueList;
KAL_SPIN_LOCK_DECLARATION();
ASSERT(prAdapter);
ASSERT(prMsduInfo);
prQueList = &prAdapter->rTxCtrl.rFreeMsduInfoList;
ASSERT(prMsduInfo->prPacket);
if (prMsduInfo->prPacket) {
cnmMemFree(prAdapter, prMsduInfo->prPacket);
prMsduInfo->prPacket = NULL;
}
KAL_ACQUIRE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
QUEUE_INSERT_TAIL(prQueList, &prMsduInfo->rQueEntry)
KAL_RELEASE_SPIN_LOCK(prAdapter, SPIN_LOCK_TX_MSDU_INFO_LIST);
}
/*----------------------------------------------------------------------------*/
/*!
* \brief This function is used to initial the MGMT/MSG memory pool.
*
* \param (none)
*
* \return (none)
*/
/*----------------------------------------------------------------------------*/
VOID cnmMemInit(P_ADAPTER_T prAdapter)
{
P_BUF_INFO_T prBufInfo;
/* Initialize Management buffer pool */
prBufInfo = &prAdapter->rMgtBufInfo;
kalMemZero(prBufInfo, sizeof(prAdapter->rMgtBufInfo));
prBufInfo->pucBuf = prAdapter->pucMgtBufCached;
/* Setup available memory blocks. 1 indicates FREE */
prBufInfo->rFreeBlocksBitmap = (BUF_BITMAP) BITS(0, MAX_NUM_OF_BUF_BLOCKS - 1);
/* Initialize Message buffer pool */
prBufInfo = &prAdapter->rMsgBufInfo;
kalMemZero(prBufInfo, sizeof(prAdapter->rMsgBufInfo));
prBufInfo->pucBuf = &prAdapter->aucMsgBuf[0];
/* Setup available memory blocks. 1 indicates FREE */
prBufInfo->rFreeBlocksBitmap = (BUF_BITMAP) BITS(0, MAX_NUM_OF_BUF_BLOCKS - 1);
return;
} /* end of cnmMemInit() */
/*----------------------------------------------------------------------------*/
/*!
* \brief Allocate MGMT/MSG memory pool.
*
* \param[in] eRamType Target RAM type.
* TCM blk_sz= 16bytes, BUF blk_sz= 256bytes
* \param[in] u4Length Length of the buffer to allocate.
*
* \retval !NULL Pointer to the start address of allocated memory.
* \retval NULL Fail to allocat memory
*/
/*----------------------------------------------------------------------------*/
PVOID cnmMemAlloc(IN P_ADAPTER_T prAdapter, IN ENUM_RAM_TYPE_T eRamType, IN UINT_32 u4Length)
{
P_BUF_INFO_T prBufInfo;
BUF_BITMAP rRequiredBitmap;
UINT_32 u4BlockNum;
UINT_32 i, u4BlkSzInPower;
PVOID pvMemory;
KAL_SPIN_LOCK_DECLARATION();
ASSERT(prAdapter);
ASSERT(u4Length);
if (eRamType == RAM_TYPE_MSG && u4Length <= 256) {
prBufInfo = &prAdapter->rMsgBufInfo;
u4BlkSzInPower = MSG_BUF_BLOCK_SIZE_IN_POWER_OF_2;
u4Length += (MSG_BUF_BLOCK_SIZE - 1);
u4BlockNum = u4Length >> MSG_BUF_BLOCK_SIZE_IN_POWER_OF_2;
ASSERT(u4BlockNum <= MAX_NUM_OF_BUF_BLOCKS);
} else {
eRamType = RAM_TYPE_BUF;
prBufInfo = &prAdapter->rMgtBufInfo;
u4BlkSzInPower = MGT_BUF_BLOCK_SIZE_IN_POWER_OF_2;
u4Length += (MGT_BUF_BLOCK_SIZE - 1);
u4BlockNum = u4Length >> MGT_BUF_BLOCK_SIZE_IN_POWER_OF_2;
ASSERT(u4BlockNum <= MAX_NUM_OF_BUF_BLOCKS);
}
#if CFG_DBG_MGT_BUF
prBufInfo->u4AllocCount++;
#endif
KAL_ACQUIRE_SPIN_LOCK(prAdapter,
eRamType == RAM_TYPE_MSG ? SPIN_LOCK_MSG_BUF : SPIN_LOCK_MGT_BUF);
if ((u4BlockNum > 0) && (u4BlockNum <= MAX_NUM_OF_BUF_BLOCKS)) {
/* Convert number of block into bit cluster */
rRequiredBitmap = BITS(0, u4BlockNum - 1);
for (i = 0; i <= (MAX_NUM_OF_BUF_BLOCKS - u4BlockNum); i++) {
/* Have available memory blocks */
if ((prBufInfo->rFreeBlocksBitmap & rRequiredBitmap)
== rRequiredBitmap) {
/* Clear corresponding bits of allocated memory blocks */
prBufInfo->rFreeBlocksBitmap &= ~rRequiredBitmap;
/* Store how many blocks be allocated */
prBufInfo->aucAllocatedBlockNum[i] = (UINT_8) u4BlockNum;
KAL_RELEASE_SPIN_LOCK(prAdapter,
eRamType == RAM_TYPE_MSG ?
SPIN_LOCK_MSG_BUF : SPIN_LOCK_MGT_BUF);
/* Return the start address of allocated memory */
return (PVOID) (prBufInfo->pucBuf + (i << u4BlkSzInPower));
}
rRequiredBitmap <<= 1;
}
}
#ifdef LINUX
pvMemory = (PVOID) kalMemAlloc(u4Length, VIR_MEM_TYPE);
#else
pvMemory = (PVOID) NULL;
#endif
#if CFG_DBG_MGT_BUF
prBufInfo->u4AllocNullCount++;
if (pvMemory) {
prAdapter->u4MemAllocDynamicCount++;
}
#endif
KAL_RELEASE_SPIN_LOCK(prAdapter,
eRamType == RAM_TYPE_MSG ? SPIN_LOCK_MSG_BUF : SPIN_LOCK_MGT_BUF);
return pvMemory;
} /* end of cnmMemAlloc() */
/*----------------------------------------------------------------------------*/
/*!
* \brief Release memory to MGT/MSG memory pool.
*
* \param pucMemory Start address of previous allocated memory
*
* \return (none)
*/
/*----------------------------------------------------------------------------*/
VOID cnmMemFree(IN P_ADAPTER_T prAdapter, IN PVOID pvMemory)
{
P_BUF_INFO_T prBufInfo;
UINT_32 u4BlockIndex;
BUF_BITMAP rAllocatedBlocksBitmap;
ENUM_RAM_TYPE_T eRamType;
KAL_SPIN_LOCK_DECLARATION();
ASSERT(prAdapter);
ASSERT(pvMemory);
if (!pvMemory) {
return;
}
/* Judge it belongs to which RAM type */
if (((UINT_32) pvMemory >= (UINT_32) &prAdapter->aucMsgBuf[0]) &&
((UINT_32) pvMemory <= (UINT_32) &prAdapter->aucMsgBuf[MSG_BUFFER_SIZE - 1])) {
prBufInfo = &prAdapter->rMsgBufInfo;
u4BlockIndex = ((UINT_32) pvMemory - (UINT_32) prBufInfo->pucBuf)
>> MSG_BUF_BLOCK_SIZE_IN_POWER_OF_2;
ASSERT(u4BlockIndex < MAX_NUM_OF_BUF_BLOCKS);
eRamType = RAM_TYPE_MSG;
} else if (((UINT_32) pvMemory >= (UINT_32) prAdapter->pucMgtBufCached) &&
((UINT_32) pvMemory <=
((UINT_32) prAdapter->pucMgtBufCached + MGT_BUFFER_SIZE - 1))) {
prBufInfo = &prAdapter->rMgtBufInfo;
u4BlockIndex = ((UINT_32) pvMemory - (UINT_32) prBufInfo->pucBuf)
>> MGT_BUF_BLOCK_SIZE_IN_POWER_OF_2;
ASSERT(u4BlockIndex < MAX_NUM_OF_BUF_BLOCKS);
eRamType = RAM_TYPE_BUF;
} else {
#ifdef LINUX
/* For Linux, it is supported because size is not needed */
kalMemFree(pvMemory, VIR_MEM_TYPE, 0);
#else
/* For Windows, it is not supported because of no size argument */
ASSERT(0);
#endif
#if CFG_DBG_MGT_BUF
prAdapter->u4MemFreeDynamicCount++;
#endif
return;
}
KAL_ACQUIRE_SPIN_LOCK(prAdapter,
eRamType == RAM_TYPE_MSG ? SPIN_LOCK_MSG_BUF : SPIN_LOCK_MGT_BUF);
#if CFG_DBG_MGT_BUF
prBufInfo->u4FreeCount++;
#endif
/* Convert number of block into bit cluster */
ASSERT(prBufInfo->aucAllocatedBlockNum[u4BlockIndex] > 0);
rAllocatedBlocksBitmap = BITS(0, prBufInfo->aucAllocatedBlockNum[u4BlockIndex] - 1);
rAllocatedBlocksBitmap <<= u4BlockIndex;
/* Clear saved block count for this memory segment */
prBufInfo->aucAllocatedBlockNum[u4BlockIndex] = 0;
/* Set corresponding bit of released memory block */
prBufInfo->rFreeBlocksBitmap |= rAllocatedBlocksBitmap;
KAL_RELEASE_SPIN_LOCK(prAdapter,
eRamType == RAM_TYPE_MSG ? SPIN_LOCK_MSG_BUF : SPIN_LOCK_MGT_BUF);
return;
} /* end of cnmMemFree() */
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
VOID cnmStaRecInit(P_ADAPTER_T prAdapter)
{
P_STA_RECORD_T prStaRec;
UINT_16 i;
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = &prAdapter->arStaRec[i];
prStaRec->ucIndex = (UINT_8) i;
prStaRec->fgIsInUse = FALSE;
}
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
VOID cnmStaRecUninit(IN P_ADAPTER_T prAdapter)
{
P_STA_RECORD_T prStaRec;
UINT_16 i;
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = &prAdapter->arStaRec[i];
if (prStaRec->fgIsInUse) {
cnmStaRecFree(prAdapter, prStaRec, FALSE);
}
}
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
P_STA_RECORD_T cnmStaRecAlloc(P_ADAPTER_T prAdapter, UINT_8 ucNetTypeIndex)
{
P_STA_RECORD_T prStaRec;
UINT_16 i, k;
ASSERT(prAdapter);
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = &prAdapter->arStaRec[i];
if (!prStaRec->fgIsInUse) {
/*---- Initialize STA_REC_T here ----*/
kalMemZero(prStaRec, sizeof(STA_RECORD_T));
prStaRec->ucIndex = (UINT_8) i;
prStaRec->ucNetTypeIndex = ucNetTypeIndex;
prStaRec->fgIsInUse = TRUE;
if (prStaRec->pucAssocReqIe) {
kalMemFree(prStaRec->pucAssocReqIe, VIR_MEM_TYPE,
prStaRec->u2AssocReqIeLen);
prStaRec->pucAssocReqIe = NULL;
prStaRec->u2AssocReqIeLen = 0;
}
/* Initialize the SN caches for duplicate detection */
for (k = 0; k < TID_NUM + 1; k++) {
prStaRec->au2CachedSeqCtrl[k] = 0xFFFF;
}
/* Initialize SW TX queues in STA_REC */
for (k = 0; k < STA_WAIT_QUEUE_NUM; k++) {
LINK_INITIALIZE(&prStaRec->arStaWaitQueue[k]);
}
/* Default enable TX/RX AMPDU */
prStaRec->fgTxAmpduEn = TRUE;
prStaRec->fgRxAmpduEn = TRUE;
for (k = 0; k < NUM_OF_PER_STA_TX_QUEUES; k++) {
QUEUE_INITIALIZE(&prStaRec->arTxQueue[k]);
}
break;
}
}
return (i < CFG_STA_REC_NUM) ? prStaRec : NULL;
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
VOID cnmStaRecFree(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec, BOOLEAN fgSyncToChip)
{
ASSERT(prAdapter);
ASSERT(prStaRec);
/* To do: free related resources, e.g. timers, buffers, etc */
cnmTimerStopTimer(prAdapter, &prStaRec->rTxReqDoneOrRxRespTimer);
prStaRec->fgTransmitKeyExist = FALSE;
prStaRec->fgSetPwrMgtBit = FALSE;
if (prStaRec->pucAssocReqIe) {
kalMemFree(prStaRec->pucAssocReqIe, VIR_MEM_TYPE, prStaRec->u2AssocReqIeLen);
prStaRec->pucAssocReqIe = NULL;
prStaRec->u2AssocReqIeLen = 0;
}
qmDeactivateStaRec(prAdapter, prStaRec->ucIndex);
if (fgSyncToChip) {
cnmStaSendRemoveCmd(prAdapter, prStaRec);
}
prStaRec->fgIsInUse = FALSE;
return;
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
VOID
cnmStaFreeAllStaByNetType(P_ADAPTER_T prAdapter,
ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex, BOOLEAN fgSyncToChip)
{
P_STA_RECORD_T prStaRec;
UINT_16 i;
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = (P_STA_RECORD_T) &prAdapter->arStaRec[i];
if (prStaRec->fgIsInUse && prStaRec->ucNetTypeIndex == (UINT_8) eNetTypeIndex) {
cnmStaRecFree(prAdapter, prStaRec, fgSyncToChip);
}
} /* end of for loop */
}
/*----------------------------------------------------------------------------*/
/*!
* \brief
*
* \param[in]
*
* \return none
*/
/*----------------------------------------------------------------------------*/
P_STA_RECORD_T cnmGetStaRecByIndex(P_ADAPTER_T prAdapter, UINT_8 ucIndex)
{
P_STA_RECORD_T prStaRec;
ASSERT(prAdapter);
prStaRec = (ucIndex < CFG_STA_REC_NUM) ? &prAdapter->arStaRec[ucIndex] : NULL;
if (prStaRec && prStaRec->fgIsInUse == FALSE) {
prStaRec = NULL;
}
return prStaRec;
}
/*----------------------------------------------------------------------------*/
/*!
* @brief Get STA_RECORD_T by Peer MAC Address(Usually TA).
*
* @param[in] pucPeerMacAddr Given Peer MAC Address.
*
* @retval Pointer to STA_RECORD_T, if found. NULL, if not found
*/
/*----------------------------------------------------------------------------*/
P_STA_RECORD_T
cnmGetStaRecByAddress(P_ADAPTER_T prAdapter, UINT_8 ucNetTypeIndex, PUINT_8 pucPeerMacAddr)
{
P_STA_RECORD_T prStaRec;
UINT_16 i;
ASSERT(prAdapter);
ASSERT(pucPeerMacAddr);
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = &prAdapter->arStaRec[i];
if (prStaRec->fgIsInUse &&
prStaRec->ucNetTypeIndex == ucNetTypeIndex &&
EQUAL_MAC_ADDR(prStaRec->aucMacAddr, pucPeerMacAddr)) {
break;
}
}
return (i < CFG_STA_REC_NUM) ? prStaRec : NULL;
}
/*----------------------------------------------------------------------------*/
/*!
* @brief Reset the Status and Reason Code Field to 0 of all Station Records for
* the specified Network Type
*
* @param[in] eNetType Specify Network Type
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
VOID cnmStaRecResetStatus(P_ADAPTER_T prAdapter, ENUM_NETWORK_TYPE_INDEX_T eNetTypeIndex)
{
cnmStaFreeAllStaByNetType(prAdapter, eNetTypeIndex, FALSE);
#if 0
P_STA_RECORD_T prStaRec;
UINT_16 i;
ASSERT(prAdapter);
for (i = 0; i < CFG_STA_REC_NUM; i++) {
prStaRec = &prAdapter->arStaRec[i];
if (prStaRec->fgIsInUse) {
if ((NETWORK_TYPE_AIS_INDEX == eNetTypeIndex) &&
IS_STA_IN_AIS(prStaRec->eStaType)) {
prStaRec->u2StatusCode = STATUS_CODE_SUCCESSFUL;
prStaRec->u2ReasonCode = REASON_CODE_RESERVED;
prStaRec->ucJoinFailureCount = 0;
prStaRec->fgTransmitKeyExist = FALSE;
prStaRec->fgSetPwrMgtBit = FALSE;
}
/* TODO(Kevin): For P2P and BOW */
}
}
return;
#endif
}
/*----------------------------------------------------------------------------*/
/*!
* @brief This function will change the ucStaState of STA_RECORD_T and also do
* event indication to HOST to sync the STA_RECORD_T in driver.
*
* @param[in] prStaRec Pointer to the STA_RECORD_T
* @param[in] u4NewState New STATE to change.
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
VOID cnmStaRecChangeState(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec, UINT_8 ucNewState)
{
BOOLEAN fgNeedResp;
ASSERT(prAdapter);
ASSERT(prStaRec);
ASSERT(prStaRec->fgIsInUse);
/* Do nothing when following state transitions happen,
* other 6 conditions should be sync to FW, including 1-->1, 3-->3
*/
if ((ucNewState == STA_STATE_2 && prStaRec->ucStaState != STA_STATE_3) ||
(ucNewState == STA_STATE_1 && prStaRec->ucStaState == STA_STATE_2)) {
prStaRec->ucStaState = ucNewState;
return;
}
fgNeedResp = FALSE;
if (ucNewState == STA_STATE_3) {
secFsmEventStart(prAdapter, prStaRec);
if (ucNewState != prStaRec->ucStaState) {
fgNeedResp = TRUE;
}
} else {
if (ucNewState != prStaRec->ucStaState && prStaRec->ucStaState == STA_STATE_3) {
qmDeactivateStaRec(prAdapter, prStaRec->ucIndex);
}
fgNeedResp = FALSE;
}
prStaRec->ucStaState = ucNewState;
cnmStaSendUpdateCmd(prAdapter, prStaRec, fgNeedResp);
#if CFG_ENABLE_WIFI_DIRECT
/* To do: Confirm if it is invoked here or other location, but it should
* be invoked after state sync of STA_REC
* Update system operation parameters for AP mode
*/
if (prAdapter->fgIsP2PRegistered && (IS_STA_IN_P2P(prStaRec))) {
P_BSS_INFO_T prBssInfo;
prBssInfo = &prAdapter->rWifiVar.arBssInfo[prStaRec->ucNetTypeIndex];
if (prBssInfo->eCurrentOPMode == OP_MODE_ACCESS_POINT) {
rlmUpdateParamsForAP(prAdapter, prBssInfo, FALSE);
}
}
#endif
return;
}
/*----------------------------------------------------------------------------*/
/*!
* @brief
*
* @param[in]
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
static VOID
cnmStaRecHandleEventPkt(P_ADAPTER_T prAdapter, P_CMD_INFO_T prCmdInfo, PUINT_8 pucEventBuf)
{
P_EVENT_ACTIVATE_STA_REC_T prEventContent;
P_STA_RECORD_T prStaRec;
prEventContent = (P_EVENT_ACTIVATE_STA_REC_T) pucEventBuf;
prStaRec = cnmGetStaRecByIndex(prAdapter, prEventContent->ucStaRecIdx);
if (prStaRec && prStaRec->ucStaState == STA_STATE_3 &&
!kalMemCmp(&prStaRec->aucMacAddr[0], &prEventContent->aucMacAddr[0], MAC_ADDR_LEN)) {
qmActivateStaRec(prAdapter, prStaRec);
}
}
/*----------------------------------------------------------------------------*/
/*!
* @brief
*
* @param[in]
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
static VOID cnmStaSendUpdateCmd(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec, BOOLEAN fgNeedResp)
{
P_CMD_UPDATE_STA_RECORD_T prCmdContent;
WLAN_STATUS rStatus;
ASSERT(prAdapter);
ASSERT(prStaRec);
ASSERT(prStaRec->fgIsInUse);
/* To do: come out a mechanism to limit one STA_REC sync once for AP mode
* to avoid buffer empty case when many STAs are associated
* simultaneously.
*/
/* To do: how to avoid 2 times of allocated memory. Use Stack?
* One is here, the other is in wlanSendQueryCmd()
*/
prCmdContent = cnmMemAlloc(prAdapter, RAM_TYPE_BUF, sizeof(CMD_UPDATE_STA_RECORD_T));
ASSERT(prCmdContent);
/* To do: exception handle */
if (!prCmdContent) {
return;
}
prCmdContent->ucIndex = prStaRec->ucIndex;
prCmdContent->ucStaType = (UINT_8) prStaRec->eStaType;
kalMemCopy(&prCmdContent->aucMacAddr[0], &prStaRec->aucMacAddr[0], MAC_ADDR_LEN);
prCmdContent->u2AssocId = prStaRec->u2AssocId;
prCmdContent->u2ListenInterval = prStaRec->u2ListenInterval;
prCmdContent->ucNetTypeIndex = prStaRec->ucNetTypeIndex;
prCmdContent->ucDesiredPhyTypeSet = prStaRec->ucDesiredPhyTypeSet;
prCmdContent->u2DesiredNonHTRateSet = prStaRec->u2DesiredNonHTRateSet;
prCmdContent->u2BSSBasicRateSet = prStaRec->u2BSSBasicRateSet;
prCmdContent->ucMcsSet = prStaRec->ucMcsSet;
prCmdContent->ucSupMcs32 = (UINT_8) prStaRec->fgSupMcs32;
prCmdContent->u2HtCapInfo = prStaRec->u2HtCapInfo;
prCmdContent->ucNeedResp = (UINT_8) fgNeedResp;
#if !CFG_SLT_SUPPORT
if (prAdapter->rWifiVar.eRateSetting != FIXED_RATE_NONE) {
/* override rate configuration */
nicUpdateRateParams(prAdapter,
prAdapter->rWifiVar.eRateSetting,
&(prCmdContent->ucDesiredPhyTypeSet),
&(prCmdContent->u2DesiredNonHTRateSet),
&(prCmdContent->u2BSSBasicRateSet),
&(prCmdContent->ucMcsSet),
&(prCmdContent->ucSupMcs32), &(prCmdContent->u2HtCapInfo));
}
#endif
prCmdContent->ucIsQoS = prStaRec->fgIsQoS;
prCmdContent->ucIsUapsdSupported = prStaRec->fgIsUapsdSupported;
prCmdContent->ucStaState = prStaRec->ucStaState;
prCmdContent->ucAmpduParam = prStaRec->ucAmpduParam;
prCmdContent->u2HtExtendedCap = prStaRec->u2HtExtendedCap;
prCmdContent->u4TxBeamformingCap = prStaRec->u4TxBeamformingCap;
prCmdContent->ucAselCap = prStaRec->ucAselCap;
prCmdContent->ucRCPI = prStaRec->ucRCPI;
prCmdContent->ucUapsdAc = prStaRec->ucBmpTriggerAC | (prStaRec->ucBmpDeliveryAC << 4);
prCmdContent->ucUapsdSp = prStaRec->ucUapsdSp;
rStatus = wlanSendSetQueryCmd(prAdapter, /* prAdapter */
CMD_ID_UPDATE_STA_RECORD, /* ucCID */
TRUE, /* fgSetQuery */
fgNeedResp, /* fgNeedResp */
FALSE, /* fgIsOid */
fgNeedResp ? cnmStaRecHandleEventPkt : NULL, NULL, /* pfCmdTimeoutHandler */
sizeof(CMD_UPDATE_STA_RECORD_T), /* u4SetQueryInfoLen */
(PUINT_8) prCmdContent, /* pucInfoBuffer */
NULL, /* pvSetQueryBuffer */
0 /* u4SetQueryBufferLen */
);
ASSERT(rStatus == WLAN_STATUS_PENDING);
cnmMemFree(prAdapter, prCmdContent);
}
/*----------------------------------------------------------------------------*/
/*!
* @brief
*
* @param[in]
*
* @return (none)
*/
/*----------------------------------------------------------------------------*/
static VOID cnmStaSendRemoveCmd(P_ADAPTER_T prAdapter, P_STA_RECORD_T prStaRec)
{
CMD_REMOVE_STA_RECORD_T rCmdContent;
WLAN_STATUS rStatus;
ASSERT(prAdapter);
ASSERT(prStaRec);
rCmdContent.ucIndex = prStaRec->ucIndex;
kalMemCopy(&rCmdContent.aucMacAddr[0], &prStaRec->aucMacAddr[0], MAC_ADDR_LEN);
rStatus = wlanSendSetQueryCmd(prAdapter, /* prAdapter */
CMD_ID_REMOVE_STA_RECORD, /* ucCID */
TRUE, /* fgSetQuery */
FALSE, /* fgNeedResp */
FALSE, /* fgIsOid */
NULL, /* pfCmdDoneHandler */
NULL, /* pfCmdTimeoutHandler */
sizeof(CMD_REMOVE_STA_RECORD_T), /* u4SetQueryInfoLen */
(PUINT_8) &rCmdContent, /* pucInfoBuffer */
NULL, /* pvSetQueryBuffer */
0 /* u4SetQueryBufferLen */
);
ASSERT(rStatus == WLAN_STATUS_PENDING);
}
| gpl-2.0 |
nixcloud/linux-odroid | drivers/irqchip/irq-armada-370-xp.c | 105 | 15992 | /*
* Marvell Armada 370 and Armada XP SoC IRQ handling
*
* Copyright (C) 2012 Marvell
*
* Lior Amsalem <alior@marvell.com>
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
* Ben Dooks <ben.dooks@codethink.co.uk>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/msi.h>
#include <asm/mach/arch.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
#include <asm/mach/irq.h>
#include "irqchip.h"
/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
#define ARMADA_370_XP_INT_CONTROL (0x00)
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_375_PPI_CAUSE (0x10)
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
#define IPI_DOORBELL_MASK 0xFF
#define PCI_MSI_DOORBELL_START (16)
#define PCI_MSI_DOORBELL_NR (16)
#define PCI_MSI_DOORBELL_END (32)
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif
/*
* In SMP mode:
* For shared global interrupts, mask/unmask global enable bit
* For CPU interrupts, mask/unmask the calling CPU's bit
*/
static void armada_370_xp_irq_mask(struct irq_data *d)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_SET_MASK_OFFS);
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
#ifdef CONFIG_PCI_MSI
static int armada_370_xp_alloc_msi(void)
{
int hwirq;
mutex_lock(&msi_used_lock);
hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
if (hwirq >= PCI_MSI_DOORBELL_NR)
hwirq = -ENOSPC;
else
set_bit(hwirq, msi_used);
mutex_unlock(&msi_used_lock);
return hwirq;
}
static void armada_370_xp_free_msi(int hwirq)
{
mutex_lock(&msi_used_lock);
if (!test_bit(hwirq, msi_used))
pr_err("trying to free unused MSI#%d\n", hwirq);
else
clear_bit(hwirq, msi_used);
mutex_unlock(&msi_used_lock);
}
static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
struct pci_dev *pdev,
struct msi_desc *desc)
{
struct msi_msg msg;
int virq, hwirq;
/* We support MSI, but not MSI-X */
if (desc->msi_attrib.is_msix)
return -EINVAL;
hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0)
return hwirq;
virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
if (!virq) {
armada_370_xp_free_msi(hwirq);
return -EINVAL;
}
irq_set_msi_desc(virq, desc);
msg.address_lo = msi_doorbell_addr;
msg.address_hi = 0;
msg.data = 0xf00 | (hwirq + 16);
pci_write_msi_msg(virq, &msg);
return 0;
}
static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
unsigned long hwirq = d->hwirq;
irq_dispose_mapping(irq);
armada_370_xp_free_msi(hwirq);
}
static struct irq_chip armada_370_xp_msi_irq_chip = {
.name = "armada_370_xp_msi_irq",
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
handle_simple_irq);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
.map = armada_370_xp_msi_map,
};
static int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
{
struct msi_controller *msi_chip;
u32 reg;
int ret;
msi_doorbell_addr = main_int_phys_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS;
msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
if (!msi_chip)
return -ENOMEM;
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
msi_chip->of_node = node;
armada_370_xp_msi_domain =
irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
&armada_370_xp_msi_irq_ops,
NULL);
if (!armada_370_xp_msi_domain) {
kfree(msi_chip);
return -ENOMEM;
}
ret = of_pci_msi_chip_add(msi_chip);
if (ret < 0) {
irq_domain_remove(armada_370_xp_msi_domain);
kfree(msi_chip);
return ret;
}
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
| PCI_MSI_DOORBELL_MASK;
writel(reg, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
/* Unmask IPI interrupt */
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
return 0;
}
#else
static inline int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
{
return 0;
}
#endif
#ifdef CONFIG_SMP
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long reg, mask;
int cpu;
/* Select a single core from the affinity mask which is online */
cpu = cpumask_any_and(mask_val, cpu_online_mask);
mask = 1UL << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock);
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock);
return IRQ_SET_MASK_OK;
}
#endif
static struct irq_chip armada_370_xp_irq_chip = {
.name = "armada_370_xp_irq",
.irq_mask = armada_370_xp_irq_mask,
.irq_mask_ack = armada_370_xp_irq_mask,
.irq_unmask = armada_370_xp_irq_unmask,
#ifdef CONFIG_SMP
.irq_set_affinity = armada_xp_set_affinity,
#endif
};
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(hw, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
else
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
irq_set_percpu_devid(virq);
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_percpu_devid_irq);
} else {
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_level_irq);
}
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
}
#ifdef CONFIG_SMP
static void armada_mpic_send_doorbell(const struct cpumask *mask,
unsigned int irq)
{
int cpu;
unsigned long map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
dsb();
/* submit softirq */
writel((map << 8) | irq, main_int_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
static void armada_xp_mpic_smp_cpu_init(void)
{
u32 control;
int nr_irqs, i;
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
nr_irqs = (control >> 2) & 0x3ff;
for (i = 0; i < nr_irqs; i++)
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
/* Clear pending IPIs */
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Enable first 8 IPIs */
writel(IPI_DOORBELL_MASK, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
/* Unmask IPI interrupt */
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
armada_xp_mpic_smp_cpu_init();
return NOTIFY_OK;
}
static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
.notifier_call = armada_xp_mpic_secondary_init,
.priority = 100,
};
static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
return NOTIFY_OK;
}
static struct notifier_block mpic_cascaded_cpu_notifier = {
.notifier_call = mpic_cascaded_secondary_init,
.priority = 100,
};
#endif /* CONFIG_SMP */
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.map = armada_370_xp_mpic_irq_map,
.xlate = irq_domain_xlate_onecell,
};
#ifdef CONFIG_PCI_MSI
static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
{
u32 msimask, msinr;
msimask = readl_relaxed(per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
msinr < PCI_MSI_DOORBELL_END; msinr++) {
int irq;
if (!(msimask & BIT(msinr)))
continue;
if (is_chained) {
irq = irq_find_mapping(armada_370_xp_msi_domain,
msinr - 16);
generic_handle_irq(irq);
} else {
irq = msinr - 16;
handle_domain_irq(armada_370_xp_msi_domain,
irq, regs);
}
}
}
#else
static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
#endif
static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
unsigned long irqmap, irqn, irqsrc, cpuid;
unsigned int cascade_irq;
chained_irq_enter(chip, desc);
irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
cpuid = cpu_logical_map(smp_processor_id());
for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
irqsrc = readl_relaxed(main_int_base +
ARMADA_370_XP_INT_SOURCE_CTL(irqn));
/* Check if the interrupt is not masked on current CPU.
* Test IRQ (0-1) and FIQ (8-9) mask bits.
*/
if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
continue;
if (irqn == 1) {
armada_370_xp_handle_msi_irq(NULL, true);
continue;
}
cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
generic_handle_irq(cascade_irq);
}
chained_irq_exit(chip, desc);
}
static void __exception_irq_entry
armada_370_xp_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
do {
irqstat = readl_relaxed(per_cpu_int_base +
ARMADA_370_XP_CPU_INTACK_OFFS);
irqnr = irqstat & 0x3FF;
if (irqnr > 1022)
break;
if (irqnr > 1) {
handle_domain_irq(armada_370_xp_mpic_domain,
irqnr, regs);
continue;
}
/* MSI handling */
if (irqnr == 1)
armada_370_xp_handle_msi_irq(regs, false);
#ifdef CONFIG_SMP
/* IPI Handling */
if (irqnr == 0) {
u32 ipimask, ipinr;
ipimask = readl_relaxed(per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
for (ipinr = IPI_DOORBELL_START;
ipinr < IPI_DOORBELL_END; ipinr++) {
if (ipimask & (0x1 << ipinr))
handle_IPI(ipinr, regs);
}
continue;
}
#endif
} while (1);
}
static int armada_370_xp_mpic_suspend(void)
{
doorbell_mask_reg = readl(per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
return 0;
}
static void armada_370_xp_mpic_resume(void)
{
int nirqs;
irq_hw_number_t irq;
/* Re-enable interrupts */
nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
for (irq = 0; irq < nirqs; irq++) {
struct irq_data *data;
int virq;
virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
if (virq == 0)
continue;
if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
writel(irq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
else
writel(irq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
data = irq_get_irq_data(virq);
if (!irqd_irq_disabled(data))
armada_370_xp_irq_unmask(data);
}
/* Reconfigure doorbells for IPIs and MSIs */
writel(doorbell_mask_reg,
per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
if (doorbell_mask_reg & IPI_DOORBELL_MASK)
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
struct syscore_ops armada_370_xp_mpic_syscore_ops = {
.suspend = armada_370_xp_mpic_suspend,
.resume = armada_370_xp_mpic_resume,
};
static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
int nr_irqs, i;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
BUG_ON(!request_mem_region(main_int_res.start,
resource_size(&main_int_res),
node->full_name));
BUG_ON(!request_mem_region(per_cpu_int_res.start,
resource_size(&per_cpu_int_res),
node->full_name));
main_int_base = ioremap(main_int_res.start,
resource_size(&main_int_res));
BUG_ON(!main_int_base);
per_cpu_int_base = ioremap(per_cpu_int_res.start,
resource_size(&per_cpu_int_res));
BUG_ON(!per_cpu_int_base);
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
nr_irqs = (control >> 2) & 0x3ff;
for (i = 0; i < nr_irqs; i++)
writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
armada_370_xp_mpic_domain =
irq_domain_add_linear(node, nr_irqs,
&armada_370_xp_mpic_irq_ops, NULL);
BUG_ON(!armada_370_xp_mpic_domain);
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
#endif
armada_370_xp_msi_init(node, main_int_res.start);
parent_irq = irq_of_parse_and_map(node, 0);
if (parent_irq <= 0) {
irq_set_default_host(armada_370_xp_mpic_domain);
set_handle_irq(armada_370_xp_handle_irq);
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
#endif
} else {
#ifdef CONFIG_SMP
register_cpu_notifier(&mpic_cascaded_cpu_notifier);
#endif
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
}
register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
return 0;
}
IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
| gpl-2.0 |
Evil-Green/Ptah-GT-I9300 | drivers/usb/host/ohci-s5p.c | 105 | 12688 | /* ohci-s5p.c - Driver for USB HOST on Samsung S5P platform device
*
* Bus Glue for SAMSUNG S5P USB HOST OHCI Controller
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
* (C) Copyright 2002 Hewlett-Packard Company
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
*
* Based on "ohci-au1xxx.c" by Matt Porter <mporter@kernel.crashing.org>
* Modified for SAMSUNG s5p OHCI by Jingoo Han <jg1.han@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <plat/ehci.h>
#include <plat/usb-phy.h>
#include <mach/board_rev.h>
struct s5p_ohci_hcd {
struct device *dev;
struct usb_hcd *hcd;
struct clk *clk;
int power_on;
};
#ifdef CONFIG_USB_EXYNOS_SWITCH
int s5p_ohci_port_power_off(struct platform_device *pdev)
{
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
ohci_writel (ohci, RH_HS_LPS, &ohci->regs->roothub.status);
return 0;
}
EXPORT_SYMBOL_GPL(s5p_ohci_port_power_off);
int s5p_ohci_port_power_on(struct platform_device *pdev)
{
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
return 0;
}
EXPORT_SYMBOL_GPL(s5p_ohci_port_power_on);
#endif
#ifdef CONFIG_PM
static int ohci_hcd_s5p_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
unsigned long flags;
int rc = 0;
/* Root hub was already suspended. Disable irq emission and
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
*
* This is still racy as hcd->state is manipulated outside of
* any locks =P But that will be a different fix.
*/
spin_lock_irqsave(&ohci->lock, flags);
if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) {
spin_unlock_irqrestore(&ohci->lock, flags);
return -EINVAL;
}
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&ohci->lock, flags);
if (pdata && pdata->phy_exit)
pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
clk_disable(s5p_ohci->clk);
return rc;
}
static int ohci_hcd_s5p_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
int rc = 0;
clk_enable(s5p_ohci->clk);
pm_runtime_resume(&pdev->dev);
if (pdata && pdata->phy_init)
pdata->phy_init(pdev, S5P_USB_PHY_HOST);
/* if OHCI was off, hcd was removed */
if (!s5p_ohci->power_on) {
dev_info(dev, "Nothing to do for the device (power off)\n");
return 0;
}
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
ohci_finish_controller_resume(hcd);
return rc;
}
#else
#define ohci_hcd_s5p_drv_suspend NULL
#define ohci_hcd_s5p_drv_resume NULL
#endif
#ifdef CONFIG_USB_SUSPEND
static int ohci_hcd_s5p_drv_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
unsigned long flags;
int rc = 0;
/* Root hub was already suspended. Disable irq emission and
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
*
* This is still racy as hcd->state is manipulated outside of
* any locks =P But that will be a different fix.
*/
spin_lock_irqsave(&ohci->lock, flags);
if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) {
spin_unlock_irqrestore(&ohci->lock, flags);
err("Not ready %s", hcd->self.bus_name);
return rc;
}
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&ohci->lock, flags);
if (pdata->phy_suspend)
pdata->phy_suspend(pdev, S5P_USB_PHY_HOST);
return rc;
}
static int ohci_hcd_s5p_drv_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
if (dev->power.is_suspended)
return 0;
if (pdata->phy_resume)
pdata->phy_resume(pdev, S5P_USB_PHY_HOST);
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
ohci_finish_controller_resume(hcd);
return 0;
}
#else
#define ohci_hcd_s5p_drv_runtime_suspend NULL
#define ohci_hcd_s5p_drv_runtime_resume NULL
#endif
static int ohci_s5p_init(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ohci_dbg(ohci, "ohci_s5p_init, ohci:%p", ohci);
ret = ohci_init(ohci);
if (ret < 0)
return ret;
return 0;
}
static int ohci_s5p_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ohci_dbg(ohci, "ohci_s5p_start, ohci:%p", ohci);
ret = ohci_run(ohci);
if (ret < 0) {
err("can't start %s", hcd->self.bus_name);
ohci_stop(hcd);
return ret;
}
return 0;
}
static const struct hc_driver ohci_s5p_hc_driver = {
.description = hcd_name,
.product_desc = "s5p OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
.irq = ohci_irq,
.flags = HCD_MEMORY|HCD_USB11,
.reset = ohci_s5p_init,
.start = ohci_s5p_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
.get_frame_number = ohci_get_frame,
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
static ssize_t show_ohci_power(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
return sprintf(buf, "OHCI Power %s\n", (s5p_ohci->power_on) ? "on" : "off");
}
static ssize_t store_ohci_power(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
int power_on;
int irq;
int retval;
if (sscanf(buf, "%d", &power_on) != 1)
return -EINVAL;
device_lock(dev);
if (!power_on && s5p_ohci->power_on) {
printk(KERN_DEBUG "%s: OHCI turns off\n", __func__);
pm_runtime_forbid(dev);
s5p_ohci->power_on = 0;
usb_remove_hcd(hcd);
if (pdata && pdata->phy_exit)
pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
} else if (power_on) {
printk(KERN_DEBUG "%s: OHCI turns on\n", __func__);
if (s5p_ohci->power_on) {
pm_runtime_forbid(dev);
usb_remove_hcd(hcd);
} else {
if (pdata && pdata->phy_init)
pdata->phy_init(pdev, S5P_USB_PHY_HOST);
}
irq = platform_get_irq(pdev, 0);
retval = usb_add_hcd(hcd, irq,
IRQF_DISABLED | IRQF_SHARED);
if (retval < 0) {
dev_err(dev, "Power On Fail\n");
goto exit;
}
s5p_ohci->power_on = 1;
pm_runtime_allow(dev);
}
exit:
device_unlock(dev);
return count;
}
static DEVICE_ATTR(ohci_power, 0664, show_ohci_power, store_ohci_power);
static inline int create_ohci_sys_file(struct ohci_hcd *ohci)
{
return device_create_file(ohci_to_hcd(ohci)->self.controller,
&dev_attr_ohci_power);
}
static inline void remove_ohci_sys_file(struct ohci_hcd *ohci)
{
device_remove_file(ohci_to_hcd(ohci)->self.controller,
&dev_attr_ohci_power);
}
static int __devinit ohci_hcd_s5p_drv_probe(struct platform_device *pdev)
{
struct s5p_ohci_platdata *pdata;
struct s5p_ohci_hcd *s5p_ohci;
struct usb_hcd *hcd = NULL;
struct ohci_hcd *ohci;
struct resource *res;
int irq;
int err;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data defined\n");
return -EINVAL;
}
s5p_ohci = kzalloc(sizeof(struct s5p_ohci_hcd), GFP_KERNEL);
if (!s5p_ohci)
return -ENOMEM;
s5p_ohci->dev = &pdev->dev;
hcd = usb_create_hcd(&ohci_s5p_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
err = -ENOMEM;
goto fail_hcd;
}
s5p_ohci->hcd = hcd;
s5p_ohci->clk = clk_get(&pdev->dev, "usbhost");
if (IS_ERR(s5p_ohci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(s5p_ohci->clk);
goto fail_clk;
}
err = clk_enable(s5p_ohci->clk);
if (err)
goto fail_clken;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
err = -ENXIO;
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = ioremap(res->start, resource_size(res));
if (!hcd->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto fail_io;
}
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto fail;
}
if (pdata->phy_init)
pdata->phy_init(pdev, S5P_USB_PHY_HOST);
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
err = usb_add_hcd(hcd, irq,
IRQF_DISABLED | IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail;
}
platform_set_drvdata(pdev, s5p_ohci);
create_ohci_sys_file(ohci);
s5p_ohci->power_on = 1;
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
fail:
iounmap(hcd->regs);
fail_io:
clk_disable(s5p_ohci->clk);
fail_clken:
clk_put(s5p_ohci->clk);
fail_clk:
usb_put_hcd(hcd);
fail_hcd:
kfree(s5p_ohci);
return err;
}
static int __devexit ohci_hcd_s5p_drv_remove(struct platform_device *pdev)
{
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ohci->hcd;
if (pdata && pdata->phy_resume)
pdata->phy_resume(pdev, S5P_USB_PHY_HOST);
usb_remove_hcd(hcd);
s5p_ohci->power_on = 0;
remove_ohci_sys_file(hcd_to_ohci(hcd));
if (pdata && pdata->phy_exit)
pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
iounmap(hcd->regs);
clk_disable(s5p_ohci->clk);
clk_put(s5p_ohci->clk);
usb_put_hcd(hcd);
kfree(s5p_ohci);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void ohci_hcd_s5p_drv_shutdown(struct platform_device *pdev)
{
struct s5p_ohci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
if (!pdata || !s5p_ohci)
return;
hcd = s5p_ohci->hcd;
if (!s5p_ohci->power_on)
return;
if (pdata && pdata->phy_resume)
pdata->phy_resume(pdev, S5P_USB_PHY_HOST);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
static const struct dev_pm_ops ohci_s5p_pm_ops = {
.suspend = ohci_hcd_s5p_drv_suspend,
.resume = ohci_hcd_s5p_drv_resume,
#ifdef CONFIG_HIBERNATION
.freeze = ohci_hcd_s5p_drv_suspend,
.thaw = ohci_hcd_s5p_drv_resume,
.restore = ohci_hcd_s5p_drv_resume,
#endif
.runtime_suspend = ohci_hcd_s5p_drv_runtime_suspend,
.runtime_resume = ohci_hcd_s5p_drv_runtime_resume,
};
static struct platform_driver ohci_hcd_s5p_driver = {
.probe = ohci_hcd_s5p_drv_probe,
.remove = __devexit_p(ohci_hcd_s5p_drv_remove),
.shutdown = ohci_hcd_s5p_drv_shutdown,
.driver = {
.name = "s5p-ohci",
.owner = THIS_MODULE,
.pm = &ohci_s5p_pm_ops,
}
};
MODULE_ALIAS("platform:s5p-ohci");
| gpl-2.0 |
h8rift/android_kernel_htc_evita-3.4.10 | net/sunrpc/rpc_pipe.c | 105 | 26443 | /*
* net/sunrpc/rpc_pipe.c
*
* Userland/kernel interface for rpcauth_gss.
* Code shamelessly plagiarized from fs/nfsd/nfsctl.c
* and fs/sysfs/inode.c
*
* Copyright (c) 2002, Trond Myklebust <trond.myklebust@fys.uio.no>
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/fsnotify.h>
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <asm/ioctls.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/seq_file.h>
#include <linux/sunrpc/clnt.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/cache.h>
#include <linux/nsproxy.h>
#include <linux/notifier.h>
#include "netns.h"
#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_DEBUG
#define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "")
static struct file_system_type rpc_pipe_fs_type;
static struct kmem_cache *rpc_inode_cachep __read_mostly;
#define RPC_UPCALL_TIMEOUT (30*HZ)
static BLOCKING_NOTIFIER_HEAD(rpc_pipefs_notifier_list);
int rpc_pipefs_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_cond_register(&rpc_pipefs_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_register);
void rpc_pipefs_notifier_unregister(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&rpc_pipefs_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(rpc_pipefs_notifier_unregister);
static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
void (*destroy_msg)(struct rpc_pipe_msg *), int err)
{
struct rpc_pipe_msg *msg;
if (list_empty(head))
return;
do {
msg = list_entry(head->next, struct rpc_pipe_msg, list);
list_del_init(&msg->list);
msg->errno = err;
destroy_msg(msg);
} while (!list_empty(head));
if (waitq)
wake_up(waitq);
}
static void
rpc_timeout_upcall_queue(struct work_struct *work)
{
LIST_HEAD(free_list);
struct rpc_pipe *pipe =
container_of(work, struct rpc_pipe, queue_timeout.work);
void (*destroy_msg)(struct rpc_pipe_msg *);
struct dentry *dentry;
spin_lock(&pipe->lock);
destroy_msg = pipe->ops->destroy_msg;
if (pipe->nreaders == 0) {
list_splice_init(&pipe->pipe, &free_list);
pipe->pipelen = 0;
}
dentry = dget(pipe->dentry);
spin_unlock(&pipe->lock);
rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
&free_list, destroy_msg, -ETIMEDOUT);
dput(dentry);
}
ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
size_t mlen = min(msg->len - msg->copied, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
return mlen;
}
EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
int
rpc_queue_upcall(struct rpc_pipe *pipe, struct rpc_pipe_msg *msg)
{
int res = -EPIPE;
struct dentry *dentry;
spin_lock(&pipe->lock);
if (pipe->nreaders) {
list_add_tail(&msg->list, &pipe->pipe);
pipe->pipelen += msg->len;
res = 0;
} else if (pipe->flags & RPC_PIPE_WAIT_FOR_OPEN) {
if (list_empty(&pipe->pipe))
queue_delayed_work(rpciod_workqueue,
&pipe->queue_timeout,
RPC_UPCALL_TIMEOUT);
list_add_tail(&msg->list, &pipe->pipe);
pipe->pipelen += msg->len;
res = 0;
}
dentry = dget(pipe->dentry);
spin_unlock(&pipe->lock);
if (dentry) {
wake_up(&RPC_I(dentry->d_inode)->waitq);
dput(dentry);
}
return res;
}
EXPORT_SYMBOL_GPL(rpc_queue_upcall);
static inline void
rpc_inode_setowner(struct inode *inode, void *private)
{
RPC_I(inode)->private = private;
}
static void
rpc_close_pipes(struct inode *inode)
{
struct rpc_pipe *pipe = RPC_I(inode)->pipe;
int need_release;
LIST_HEAD(free_list);
mutex_lock(&inode->i_mutex);
spin_lock(&pipe->lock);
need_release = pipe->nreaders != 0 || pipe->nwriters != 0;
pipe->nreaders = 0;
list_splice_init(&pipe->in_upcall, &free_list);
list_splice_init(&pipe->pipe, &free_list);
pipe->pipelen = 0;
pipe->dentry = NULL;
spin_unlock(&pipe->lock);
rpc_purge_list(&RPC_I(inode)->waitq, &free_list, pipe->ops->destroy_msg, -EPIPE);
pipe->nwriters = 0;
if (need_release && pipe->ops->release_pipe)
pipe->ops->release_pipe(inode);
cancel_delayed_work_sync(&pipe->queue_timeout);
rpc_inode_setowner(inode, NULL);
RPC_I(inode)->pipe = NULL;
mutex_unlock(&inode->i_mutex);
}
static struct inode *
rpc_alloc_inode(struct super_block *sb)
{
struct rpc_inode *rpci;
rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
if (!rpci)
return NULL;
return &rpci->vfs_inode;
}
static void
rpc_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(rpc_inode_cachep, RPC_I(inode));
}
static void
rpc_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, rpc_i_callback);
}
static int
rpc_pipe_open(struct inode *inode, struct file *filp)
{
struct rpc_pipe *pipe;
int first_open;
int res = -ENXIO;
mutex_lock(&inode->i_mutex);
pipe = RPC_I(inode)->pipe;
if (pipe == NULL)
goto out;
first_open = pipe->nreaders == 0 && pipe->nwriters == 0;
if (first_open && pipe->ops->open_pipe) {
res = pipe->ops->open_pipe(inode);
if (res)
goto out;
}
if (filp->f_mode & FMODE_READ)
pipe->nreaders++;
if (filp->f_mode & FMODE_WRITE)
pipe->nwriters++;
res = 0;
out:
mutex_unlock(&inode->i_mutex);
return res;
}
static int
rpc_pipe_release(struct inode *inode, struct file *filp)
{
struct rpc_pipe *pipe;
struct rpc_pipe_msg *msg;
int last_close;
mutex_lock(&inode->i_mutex);
pipe = RPC_I(inode)->pipe;
if (pipe == NULL)
goto out;
msg = filp->private_data;
if (msg != NULL) {
spin_lock(&pipe->lock);
msg->errno = -EAGAIN;
list_del_init(&msg->list);
spin_unlock(&pipe->lock);
pipe->ops->destroy_msg(msg);
}
if (filp->f_mode & FMODE_WRITE)
pipe->nwriters --;
if (filp->f_mode & FMODE_READ) {
pipe->nreaders --;
if (pipe->nreaders == 0) {
LIST_HEAD(free_list);
spin_lock(&pipe->lock);
list_splice_init(&pipe->pipe, &free_list);
pipe->pipelen = 0;
spin_unlock(&pipe->lock);
rpc_purge_list(&RPC_I(inode)->waitq, &free_list,
pipe->ops->destroy_msg, -EAGAIN);
}
}
last_close = pipe->nwriters == 0 && pipe->nreaders == 0;
if (last_close && pipe->ops->release_pipe)
pipe->ops->release_pipe(inode);
out:
mutex_unlock(&inode->i_mutex);
return 0;
}
static ssize_t
rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct rpc_pipe *pipe;
struct rpc_pipe_msg *msg;
int res = 0;
mutex_lock(&inode->i_mutex);
pipe = RPC_I(inode)->pipe;
if (pipe == NULL) {
res = -EPIPE;
goto out_unlock;
}
msg = filp->private_data;
if (msg == NULL) {
spin_lock(&pipe->lock);
if (!list_empty(&pipe->pipe)) {
msg = list_entry(pipe->pipe.next,
struct rpc_pipe_msg,
list);
list_move(&msg->list, &pipe->in_upcall);
pipe->pipelen -= msg->len;
filp->private_data = msg;
msg->copied = 0;
}
spin_unlock(&pipe->lock);
if (msg == NULL)
goto out_unlock;
}
res = pipe->ops->upcall(filp, msg, buf, len);
if (res < 0 || msg->len == msg->copied) {
filp->private_data = NULL;
spin_lock(&pipe->lock);
list_del_init(&msg->list);
spin_unlock(&pipe->lock);
pipe->ops->destroy_msg(msg);
}
out_unlock:
mutex_unlock(&inode->i_mutex);
return res;
}
static ssize_t
rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *offset)
{
struct inode *inode = filp->f_path.dentry->d_inode;
int res;
mutex_lock(&inode->i_mutex);
res = -EPIPE;
if (RPC_I(inode)->pipe != NULL)
res = RPC_I(inode)->pipe->ops->downcall(filp, buf, len);
mutex_unlock(&inode->i_mutex);
return res;
}
static unsigned int
rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct rpc_inode *rpci = RPC_I(inode);
unsigned int mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &rpci->waitq, wait);
mutex_lock(&inode->i_mutex);
if (rpci->pipe == NULL)
mask |= POLLERR | POLLHUP;
else if (filp->private_data || !list_empty(&rpci->pipe->pipe))
mask |= POLLIN | POLLRDNORM;
mutex_unlock(&inode->i_mutex);
return mask;
}
static long
rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct rpc_pipe *pipe;
int len;
switch (cmd) {
case FIONREAD:
mutex_lock(&inode->i_mutex);
pipe = RPC_I(inode)->pipe;
if (pipe == NULL) {
mutex_unlock(&inode->i_mutex);
return -EPIPE;
}
spin_lock(&pipe->lock);
len = pipe->pipelen;
if (filp->private_data) {
struct rpc_pipe_msg *msg;
msg = filp->private_data;
len += msg->len - msg->copied;
}
spin_unlock(&pipe->lock);
mutex_unlock(&inode->i_mutex);
return put_user(len, (int __user *)arg);
default:
return -EINVAL;
}
}
static const struct file_operations rpc_pipe_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rpc_pipe_read,
.write = rpc_pipe_write,
.poll = rpc_pipe_poll,
.unlocked_ioctl = rpc_pipe_ioctl,
.open = rpc_pipe_open,
.release = rpc_pipe_release,
};
static int
rpc_show_info(struct seq_file *m, void *v)
{
struct rpc_clnt *clnt = m->private;
rcu_read_lock();
seq_printf(m, "RPC server: %s\n",
rcu_dereference(clnt->cl_xprt)->servername);
seq_printf(m, "service: %s (%d) version %d\n", clnt->cl_protname,
clnt->cl_prog, clnt->cl_vers);
seq_printf(m, "address: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
seq_printf(m, "protocol: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PROTO));
seq_printf(m, "port: %s\n", rpc_peeraddr2str(clnt, RPC_DISPLAY_PORT));
rcu_read_unlock();
return 0;
}
static int
rpc_info_open(struct inode *inode, struct file *file)
{
struct rpc_clnt *clnt = NULL;
int ret = single_open(file, rpc_show_info, NULL);
if (!ret) {
struct seq_file *m = file->private_data;
spin_lock(&file->f_path.dentry->d_lock);
if (!d_unhashed(file->f_path.dentry))
clnt = RPC_I(inode)->private;
if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
spin_unlock(&file->f_path.dentry->d_lock);
m->private = clnt;
} else {
spin_unlock(&file->f_path.dentry->d_lock);
single_release(inode, file);
ret = -EINVAL;
}
}
return ret;
}
static int
rpc_info_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct rpc_clnt *clnt = (struct rpc_clnt *)m->private;
if (clnt)
rpc_release_client(clnt);
return single_release(inode, file);
}
static const struct file_operations rpc_info_operations = {
.owner = THIS_MODULE,
.open = rpc_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = rpc_info_release,
};
struct rpc_filelist {
const char *name;
const struct file_operations *i_fop;
umode_t mode;
};
static int rpc_delete_dentry(const struct dentry *dentry)
{
return 1;
}
static const struct dentry_operations rpc_dentry_operations = {
.d_delete = rpc_delete_dentry,
};
static struct inode *
rpc_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
if (!inode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
default:
break;
}
return inode;
}
static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
umode_t mode,
const struct file_operations *i_fop,
void *private)
{
struct inode *inode;
d_drop(dentry);
inode = rpc_get_inode(dir->i_sb, mode);
if (!inode)
goto out_err;
inode->i_ino = iunique(dir->i_sb, 100);
if (i_fop)
inode->i_fop = i_fop;
if (private)
rpc_inode_setowner(inode, private);
d_add(dentry, inode);
return 0;
out_err:
printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
__FILE__, __func__, dentry->d_name.name);
dput(dentry);
return -ENOMEM;
}
static int __rpc_create(struct inode *dir, struct dentry *dentry,
umode_t mode,
const struct file_operations *i_fop,
void *private)
{
int err;
err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private);
if (err)
return err;
fsnotify_create(dir, dentry);
return 0;
}
static int __rpc_mkdir(struct inode *dir, struct dentry *dentry,
umode_t mode,
const struct file_operations *i_fop,
void *private)
{
int err;
err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private);
if (err)
return err;
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
return 0;
}
static void
init_pipe(struct rpc_pipe *pipe)
{
pipe->nreaders = 0;
pipe->nwriters = 0;
INIT_LIST_HEAD(&pipe->in_upcall);
INIT_LIST_HEAD(&pipe->in_downcall);
INIT_LIST_HEAD(&pipe->pipe);
pipe->pipelen = 0;
INIT_DELAYED_WORK(&pipe->queue_timeout,
rpc_timeout_upcall_queue);
pipe->ops = NULL;
spin_lock_init(&pipe->lock);
pipe->dentry = NULL;
}
void rpc_destroy_pipe_data(struct rpc_pipe *pipe)
{
kfree(pipe);
}
EXPORT_SYMBOL_GPL(rpc_destroy_pipe_data);
struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags)
{
struct rpc_pipe *pipe;
pipe = kzalloc(sizeof(struct rpc_pipe), GFP_KERNEL);
if (!pipe)
return ERR_PTR(-ENOMEM);
init_pipe(pipe);
pipe->ops = ops;
pipe->flags = flags;
return pipe;
}
EXPORT_SYMBOL_GPL(rpc_mkpipe_data);
static int __rpc_mkpipe_dentry(struct inode *dir, struct dentry *dentry,
umode_t mode,
const struct file_operations *i_fop,
void *private,
struct rpc_pipe *pipe)
{
struct rpc_inode *rpci;
int err;
err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private);
if (err)
return err;
rpci = RPC_I(dentry->d_inode);
rpci->private = private;
rpci->pipe = pipe;
fsnotify_create(dir, dentry);
return 0;
}
static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
{
int ret;
dget(dentry);
ret = simple_rmdir(dir, dentry);
d_delete(dentry);
dput(dentry);
return ret;
}
int rpc_rmdir(struct dentry *dentry)
{
struct dentry *parent;
struct inode *dir;
int error;
parent = dget_parent(dentry);
dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
error = __rpc_rmdir(dir, dentry);
mutex_unlock(&dir->i_mutex);
dput(parent);
return error;
}
EXPORT_SYMBOL_GPL(rpc_rmdir);
static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
{
int ret;
dget(dentry);
ret = simple_unlink(dir, dentry);
d_delete(dentry);
dput(dentry);
return ret;
}
static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
rpc_close_pipes(inode);
return __rpc_unlink(dir, dentry);
}
static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
struct qstr *name)
{
struct dentry *dentry;
dentry = d_lookup(parent, name);
if (!dentry) {
dentry = d_alloc(parent, name);
if (!dentry)
return ERR_PTR(-ENOMEM);
}
if (dentry->d_inode == NULL) {
d_set_d_op(dentry, &rpc_dentry_operations);
return dentry;
}
dput(dentry);
return ERR_PTR(-EEXIST);
}
static void __rpc_depopulate(struct dentry *parent,
const struct rpc_filelist *files,
int start, int eof)
{
struct inode *dir = parent->d_inode;
struct dentry *dentry;
struct qstr name;
int i;
for (i = start; i < eof; i++) {
name.name = files[i].name;
name.len = strlen(files[i].name);
name.hash = full_name_hash(name.name, name.len);
dentry = d_lookup(parent, &name);
if (dentry == NULL)
continue;
if (dentry->d_inode == NULL)
goto next;
switch (dentry->d_inode->i_mode & S_IFMT) {
default:
BUG();
case S_IFREG:
__rpc_unlink(dir, dentry);
break;
case S_IFDIR:
__rpc_rmdir(dir, dentry);
}
next:
dput(dentry);
}
}
static void rpc_depopulate(struct dentry *parent,
const struct rpc_filelist *files,
int start, int eof)
{
struct inode *dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
__rpc_depopulate(parent, files, start, eof);
mutex_unlock(&dir->i_mutex);
}
static int rpc_populate(struct dentry *parent,
const struct rpc_filelist *files,
int start, int eof,
void *private)
{
struct inode *dir = parent->d_inode;
struct dentry *dentry;
int i, err;
mutex_lock(&dir->i_mutex);
for (i = start; i < eof; i++) {
struct qstr q;
q.name = files[i].name;
q.len = strlen(files[i].name);
q.hash = full_name_hash(q.name, q.len);
dentry = __rpc_lookup_create_exclusive(parent, &q);
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_bad;
switch (files[i].mode & S_IFMT) {
default:
BUG();
case S_IFREG:
err = __rpc_create(dir, dentry,
files[i].mode,
files[i].i_fop,
private);
break;
case S_IFDIR:
err = __rpc_mkdir(dir, dentry,
files[i].mode,
NULL,
private);
}
if (err != 0)
goto out_bad;
}
mutex_unlock(&dir->i_mutex);
return 0;
out_bad:
__rpc_depopulate(parent, files, start, eof);
mutex_unlock(&dir->i_mutex);
printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
__FILE__, __func__, parent->d_name.name);
return err;
}
static struct dentry *rpc_mkdir_populate(struct dentry *parent,
struct qstr *name, umode_t mode, void *private,
int (*populate)(struct dentry *, void *), void *args_populate)
{
struct dentry *dentry;
struct inode *dir = parent->d_inode;
int error;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = __rpc_lookup_create_exclusive(parent, name);
if (IS_ERR(dentry))
goto out;
error = __rpc_mkdir(dir, dentry, mode, NULL, private);
if (error != 0)
goto out_err;
if (populate != NULL) {
error = populate(dentry, args_populate);
if (error)
goto err_rmdir;
}
out:
mutex_unlock(&dir->i_mutex);
return dentry;
err_rmdir:
__rpc_rmdir(dir, dentry);
out_err:
dentry = ERR_PTR(error);
goto out;
}
static int rpc_rmdir_depopulate(struct dentry *dentry,
void (*depopulate)(struct dentry *))
{
struct dentry *parent;
struct inode *dir;
int error;
parent = dget_parent(dentry);
dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (depopulate != NULL)
depopulate(dentry);
error = __rpc_rmdir(dir, dentry);
mutex_unlock(&dir->i_mutex);
dput(parent);
return error;
}
struct dentry *rpc_mkpipe_dentry(struct dentry *parent, const char *name,
void *private, struct rpc_pipe *pipe)
{
struct dentry *dentry;
struct inode *dir = parent->d_inode;
umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
struct qstr q;
int err;
if (pipe->ops->upcall == NULL)
umode &= ~S_IRUGO;
if (pipe->ops->downcall == NULL)
umode &= ~S_IWUGO;
q.name = name;
q.len = strlen(name);
q.hash = full_name_hash(q.name, q.len),
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
dentry = __rpc_lookup_create_exclusive(parent, &q);
if (IS_ERR(dentry))
goto out;
err = __rpc_mkpipe_dentry(dir, dentry, umode, &rpc_pipe_fops,
private, pipe);
if (err)
goto out_err;
out:
mutex_unlock(&dir->i_mutex);
return dentry;
out_err:
dentry = ERR_PTR(err);
printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
__FILE__, __func__, parent->d_name.name, name,
err);
goto out;
}
EXPORT_SYMBOL_GPL(rpc_mkpipe_dentry);
int
rpc_unlink(struct dentry *dentry)
{
struct dentry *parent;
struct inode *dir;
int error = 0;
parent = dget_parent(dentry);
dir = parent->d_inode;
mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
error = __rpc_rmpipe(dir, dentry);
mutex_unlock(&dir->i_mutex);
dput(parent);
return error;
}
EXPORT_SYMBOL_GPL(rpc_unlink);
enum {
RPCAUTH_info,
RPCAUTH_EOF
};
static const struct rpc_filelist authfiles[] = {
[RPCAUTH_info] = {
.name = "info",
.i_fop = &rpc_info_operations,
.mode = S_IFREG | S_IRUSR,
},
};
static int rpc_clntdir_populate(struct dentry *dentry, void *private)
{
return rpc_populate(dentry,
authfiles, RPCAUTH_info, RPCAUTH_EOF,
private);
}
static void rpc_clntdir_depopulate(struct dentry *dentry)
{
rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF);
}
struct dentry *rpc_create_client_dir(struct dentry *dentry,
struct qstr *name,
struct rpc_clnt *rpc_client)
{
return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL,
rpc_clntdir_populate, rpc_client);
}
int rpc_remove_client_dir(struct dentry *dentry)
{
return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate);
}
static const struct rpc_filelist cache_pipefs_files[3] = {
[0] = {
.name = "channel",
.i_fop = &cache_file_operations_pipefs,
.mode = S_IFREG|S_IRUSR|S_IWUSR,
},
[1] = {
.name = "content",
.i_fop = &content_file_operations_pipefs,
.mode = S_IFREG|S_IRUSR,
},
[2] = {
.name = "flush",
.i_fop = &cache_flush_operations_pipefs,
.mode = S_IFREG|S_IRUSR|S_IWUSR,
},
};
static int rpc_cachedir_populate(struct dentry *dentry, void *private)
{
return rpc_populate(dentry,
cache_pipefs_files, 0, 3,
private);
}
static void rpc_cachedir_depopulate(struct dentry *dentry)
{
rpc_depopulate(dentry, cache_pipefs_files, 0, 3);
}
struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
umode_t umode, struct cache_detail *cd)
{
return rpc_mkdir_populate(parent, name, umode, NULL,
rpc_cachedir_populate, cd);
}
void rpc_remove_cache_dir(struct dentry *dentry)
{
rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate);
}
static const struct super_operations s_ops = {
.alloc_inode = rpc_alloc_inode,
.destroy_inode = rpc_destroy_inode,
.statfs = simple_statfs,
};
#define RPCAUTH_GSSMAGIC 0x67596969
enum {
RPCAUTH_lockd,
RPCAUTH_mount,
RPCAUTH_nfs,
RPCAUTH_portmap,
RPCAUTH_statd,
RPCAUTH_nfsd4_cb,
RPCAUTH_cache,
RPCAUTH_nfsd,
RPCAUTH_RootEOF
};
static const struct rpc_filelist files[] = {
[RPCAUTH_lockd] = {
.name = "lockd",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_mount] = {
.name = "mount",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_nfs] = {
.name = "nfs",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_portmap] = {
.name = "portmap",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_statd] = {
.name = "statd",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_nfsd4_cb] = {
.name = "nfsd4_cb",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_cache] = {
.name = "cache",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
[RPCAUTH_nfsd] = {
.name = "nfsd",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
};
struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
const unsigned char *dir_name)
{
struct qstr dir = {
.name = dir_name,
.len = strlen(dir_name),
.hash = full_name_hash(dir_name, strlen(dir_name)),
};
return d_lookup(sb->s_root, &dir);
}
EXPORT_SYMBOL_GPL(rpc_d_lookup_sb);
void rpc_pipefs_init_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_init(&sn->pipefs_sb_lock);
}
struct super_block *rpc_get_sb_net(const struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_lock(&sn->pipefs_sb_lock);
if (sn->pipefs_sb)
return sn->pipefs_sb;
mutex_unlock(&sn->pipefs_sb_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rpc_get_sb_net);
void rpc_put_sb_net(const struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
BUG_ON(sn->pipefs_sb == NULL);
mutex_unlock(&sn->pipefs_sb_lock);
}
EXPORT_SYMBOL_GPL(rpc_put_sb_net);
static int
rpc_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
struct net *net = data;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
int err;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = RPCAUTH_GSSMAGIC;
sb->s_op = &s_ops;
sb->s_time_gran = 1;
inode = rpc_get_inode(sb, S_IFDIR | 0755);
sb->s_root = root = d_make_root(inode);
if (!root)
return -ENOMEM;
if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
return -ENOMEM;
dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net,
NET_NAME(net));
sn->pipefs_sb = sb;
err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_MOUNT,
sb);
if (err)
goto err_depopulate;
sb->s_fs_info = get_net(net);
return 0;
err_depopulate:
blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_UMOUNT,
sb);
sn->pipefs_sb = NULL;
__rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF);
return err;
}
static struct dentry *
rpc_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_ns(fs_type, flags, current->nsproxy->net_ns, rpc_fill_super);
}
static void rpc_kill_sb(struct super_block *sb)
{
struct net *net = sb->s_fs_info;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
mutex_lock(&sn->pipefs_sb_lock);
sn->pipefs_sb = NULL;
mutex_unlock(&sn->pipefs_sb_lock);
put_net(net);
dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n", net,
NET_NAME(net));
blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_UMOUNT,
sb);
kill_litter_super(sb);
}
static struct file_system_type rpc_pipe_fs_type = {
.owner = THIS_MODULE,
.name = "rpc_pipefs",
.mount = rpc_mount,
.kill_sb = rpc_kill_sb,
};
static void
init_once(void *foo)
{
struct rpc_inode *rpci = (struct rpc_inode *) foo;
inode_init_once(&rpci->vfs_inode);
rpci->private = NULL;
rpci->pipe = NULL;
init_waitqueue_head(&rpci->waitq);
}
int register_rpc_pipefs(void)
{
int err;
rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
sizeof(struct rpc_inode),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (!rpc_inode_cachep)
return -ENOMEM;
err = rpc_clients_notifier_register();
if (err)
goto err_notifier;
err = register_filesystem(&rpc_pipe_fs_type);
if (err)
goto err_register;
return 0;
err_register:
rpc_clients_notifier_unregister();
err_notifier:
kmem_cache_destroy(rpc_inode_cachep);
return err;
}
void unregister_rpc_pipefs(void)
{
rpc_clients_notifier_unregister();
kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
}
MODULE_ALIAS("rpc_pipefs");
| gpl-2.0 |
cuteprince/kernel_3.4_pico | kernel/irq/spurious.c | 361 | 6387 | /*
* linux/kernel/irq/spurious.c
*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains spurious interrupt handling.
*/
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include "internals.h"
static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
static void poll_spurious_irqs(unsigned long dummy);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
static int irq_poll_cpu;
static atomic_t irq_poll_active;
bool irq_wait_for_poll(struct irq_desc *desc)
{
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
"irq poll in progress on cpu %d for irq %d\n",
smp_processor_id(), desc->irq_data.irq))
return false;
#ifdef CONFIG_SMP
do {
raw_spin_unlock(&desc->lock);
while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
raw_spin_lock(&desc->lock);
} while (irqd_irq_inprogress(&desc->irq_data));
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
#else
return false;
#endif
}
static int try_one_irq(int irq, struct irq_desc *desc, bool force)
{
irqreturn_t ret = IRQ_NONE;
struct irqaction *action;
raw_spin_lock(&desc->lock);
if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
goto out;
if (irqd_irq_disabled(&desc->irq_data) && !force)
goto out;
action = desc->action;
if (!action || !(action->flags & IRQF_SHARED) ||
(action->flags & __IRQF_TIMER) ||
(action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
!action->next)
goto out;
if (irqd_irq_inprogress(&desc->irq_data)) {
desc->istate |= IRQS_PENDING;
goto out;
}
desc->istate |= IRQS_POLL_INPROGRESS;
do {
if (handle_irq_event(desc) == IRQ_HANDLED)
ret = IRQ_HANDLED;
action = desc->action;
} while ((desc->istate & IRQS_PENDING) && action);
desc->istate &= ~IRQS_POLL_INPROGRESS;
out:
raw_spin_unlock(&desc->lock);
return ret == IRQ_HANDLED;
}
static int misrouted_irq(int irq)
{
struct irq_desc *desc;
int i, ok = 0;
if (atomic_inc_return(&irq_poll_active) != 1)
goto out;
irq_poll_cpu = smp_processor_id();
for_each_irq_desc(i, desc) {
if (!i)
continue;
if (i == irq)
continue;
if (try_one_irq(i, desc, false))
ok = 1;
}
out:
atomic_dec(&irq_poll_active);
return ok;
}
static void poll_spurious_irqs(unsigned long dummy)
{
struct irq_desc *desc;
int i;
if (atomic_inc_return(&irq_poll_active) != 1)
goto out;
irq_poll_cpu = smp_processor_id();
for_each_irq_desc(i, desc) {
unsigned int state;
if (!i)
continue;
state = desc->istate;
barrier();
if (!(state & IRQS_SPURIOUS_DISABLED))
continue;
local_irq_disable();
try_one_irq(i, desc, true);
local_irq_enable();
}
out:
atomic_dec(&irq_poll_active);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
static inline int bad_action_ret(irqreturn_t action_ret)
{
if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
return 0;
return 1;
}
static void
__report_bad_irq(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
struct irqaction *action;
unsigned long flags;
if (bad_action_ret(action_ret)) {
printk(KERN_ERR "irq event %d: bogus return value %x\n",
irq, action_ret);
} else {
printk(KERN_ERR "irq %d: nobody cared (try booting with "
"the \"irqpoll\" option)\n", irq);
}
dump_stack();
printk(KERN_ERR "handlers:\n");
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
while (action) {
printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
if (action->thread_fn)
printk(KERN_CONT " threaded [<%p>] %pf",
action->thread_fn, action->thread_fn);
printk(KERN_CONT "\n");
action = action->next;
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static void
report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
{
static int count = 100;
if (count > 0) {
count--;
__report_bad_irq(irq, desc, action_ret);
}
}
static inline int
try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
struct irqaction *action;
if (!irqfixup)
return 0;
if (action_ret == IRQ_NONE)
return 1;
if (irqfixup < 2)
return 0;
if (!irq)
return 1;
action = desc->action;
barrier();
return action && (action->flags & IRQF_IRQPOLL);
}
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
if (desc->istate & IRQS_POLL_INPROGRESS)
return;
if (action_ret == IRQ_WAKE_THREAD)
return;
if (bad_action_ret(action_ret)) {
report_bad_irq(irq, desc, action_ret);
return;
}
if (unlikely(action_ret == IRQ_NONE)) {
if (time_after(jiffies, desc->last_unhandled + HZ/10))
desc->irqs_unhandled = 1;
else
desc->irqs_unhandled++;
desc->last_unhandled = jiffies;
}
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
int ok = misrouted_irq(irq);
if (action_ret == IRQ_NONE)
desc->irqs_unhandled -= ok;
}
desc->irq_count++;
if (likely(desc->irq_count < 100000))
return;
desc->irq_count = 0;
if (unlikely(desc->irqs_unhandled > 99900)) {
__report_bad_irq(irq, desc, action_ret);
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->istate |= IRQS_SPURIOUS_DISABLED;
desc->depth++;
irq_disable(desc);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
}
desc->irqs_unhandled = 0;
}
bool noirqdebug __read_mostly;
int noirqdebug_setup(char *str)
{
noirqdebug = 1;
printk(KERN_INFO "IRQ lockup detection disabled\n");
return 1;
}
__setup("noirqdebug", noirqdebug_setup);
module_param(noirqdebug, bool, 0644);
MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
return 1;
}
__setup("irqfixup", irqfixup_setup);
module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
printk(KERN_WARNING "This may significantly impact system "
"performance\n");
return 1;
}
__setup("irqpoll", irqpoll_setup);
| gpl-2.0 |
TheBootloader/android_kernel_samsung_msm8930-common | net/mac80211/util.c | 617 | 44807 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* utilities for mac80211
*/
#include <net/mac80211.h>
#include <linux/netdevice.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "mesh.h"
#include "wme.h"
#include "led.h"
#include "wep.h"
/* privid for wiphys to determine whether they belong to us or not */
void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
{
struct ieee80211_local *local;
BUG_ON(!wiphy);
local = wiphy_priv(wiphy);
return &local->hw;
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type)
{
__le16 fc = hdr->frame_control;
/* drop ACK/CTS frames and incorrect hdr len (ctrl) */
if (len < 16)
return NULL;
if (ieee80211_is_data(fc)) {
if (len < 24) /* drop incorrect hdr len (data) */
return NULL;
if (ieee80211_has_a4(fc))
return NULL;
if (ieee80211_has_tods(fc))
return hdr->addr1;
if (ieee80211_has_fromds(fc))
return hdr->addr2;
return hdr->addr3;
}
if (ieee80211_is_mgmt(fc)) {
if (len < 24) /* drop incorrect hdr len (mgmt) */
return NULL;
return hdr->addr3;
}
if (ieee80211_is_ctl(fc)) {
if(ieee80211_is_pspoll(fc))
return hdr->addr1;
if (ieee80211_is_back_req(fc)) {
switch (type) {
case NL80211_IFTYPE_STATION:
return hdr->addr2;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
return hdr->addr1;
default:
break; /* fall through to the return */
}
}
}
return NULL;
}
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
skb_queue_walk(&tx->skbs, skb) {
hdr = (struct ieee80211_hdr *) skb->data;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
}
int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
int rate, int erp, int short_preamble)
{
int dur;
/* calculate duration (in microseconds, rounded up to next higher
* integer if it includes a fractional microsecond) to send frame of
* len bytes (does not include FCS) at the given rate. Duration will
* also include SIFS.
*
* rate is in 100 kbps, so divident is multiplied by 10 in the
* DIV_ROUND_UP() operations.
*/
if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) {
/*
* OFDM:
*
* N_DBPS = DATARATE x 4
* N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS)
* (16 = SIGNAL time, 6 = tail bits)
* TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
*
* T_SYM = 4 usec
* 802.11a - 17.5.2: aSIFSTime = 16 usec
* 802.11g - 19.8.4: aSIFSTime = 10 usec +
* signal ext = 6 usec
*/
dur = 16; /* SIFS + signal ext */
dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
4 * rate); /* T_SYM x N_SYM */
} else {
/*
* 802.11b or 802.11g with 802.11b compatibility:
* 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime +
* Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0.
*
* 802.11 (DS): 15.3.3, 802.11b: 18.3.4
* aSIFSTime = 10 usec
* aPreambleLength = 144 usec or 72 usec with short preamble
* aPLCPHeaderLength = 48 usec or 24 usec with short preamble
*/
dur = 10; /* aSIFSTime = 10 usec */
dur += short_preamble ? (72 + 24) : (144 + 48);
dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate);
}
return dur;
}
/* Exported duration function for driver use */
__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
size_t frame_len,
struct ieee80211_rate *rate)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
u16 dur;
int erp;
bool short_preamble = false;
erp = 0;
if (vif) {
sdata = vif_to_sdata(vif);
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
}
dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
short_preamble);
return cpu_to_le16(dur);
}
EXPORT_SYMBOL(ieee80211_generic_frame_duration);
__le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, size_t frame_len,
const struct ieee80211_tx_info *frame_txctl)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
int erp;
u16 dur;
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
short_preamble = false;
rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
erp = 0;
if (vif) {
sdata = vif_to_sdata(vif);
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
}
/* CTS duration */
dur = ieee80211_frame_duration(local, 10, rate->bitrate,
erp, short_preamble);
/* Data frame duration */
dur += ieee80211_frame_duration(local, frame_len, rate->bitrate,
erp, short_preamble);
/* ACK duration */
dur += ieee80211_frame_duration(local, 10, rate->bitrate,
erp, short_preamble);
return cpu_to_le16(dur);
}
EXPORT_SYMBOL(ieee80211_rts_duration);
__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
size_t frame_len,
const struct ieee80211_tx_info *frame_txctl)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
int erp;
u16 dur;
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
short_preamble = false;
rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
erp = 0;
if (vif) {
sdata = vif_to_sdata(vif);
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
}
/* Data frame duration */
dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
erp, short_preamble);
if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ACK duration */
dur += ieee80211_frame_duration(local, 10, rate->bitrate,
erp, short_preamble);
}
return cpu_to_le16(dur);
}
EXPORT_SYMBOL(ieee80211_ctstoself_duration);
static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
trace_wake_queue(local, queue, reason);
if (WARN_ON(queue >= hw->queues))
return;
__clear_bit(reason, &local->queue_stop_reasons[queue]);
if (local->queue_stop_reasons[queue] != 0)
/* someone still has this queue stopped */
return;
if (skb_queue_empty(&local->pending[queue])) {
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
continue;
netif_wake_subqueue(sdata->dev, queue);
}
rcu_read_unlock();
} else
tasklet_schedule(&local->tx_pending_tasklet);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
unsigned long flags;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
__ieee80211_wake_queue(hw, queue, reason);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
{
ieee80211_wake_queue_by_reason(hw, queue,
IEEE80211_QUEUE_STOP_REASON_DRIVER);
}
EXPORT_SYMBOL(ieee80211_wake_queue);
static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
trace_stop_queue(local, queue, reason);
if (WARN_ON(queue >= hw->queues))
return;
__set_bit(reason, &local->queue_stop_reasons[queue]);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
netif_stop_subqueue(sdata->dev, queue);
rcu_read_unlock();
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
unsigned long flags;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
__ieee80211_stop_queue(hw, queue, reason);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
{
ieee80211_stop_queue_by_reason(hw, queue,
IEEE80211_QUEUE_STOP_REASON_DRIVER);
}
EXPORT_SYMBOL(ieee80211_stop_queue);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_hw *hw = &local->hw;
unsigned long flags;
int queue = skb_get_queue_mapping(skb);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
return;
}
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
__ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
__skb_queue_tail(&local->pending[queue], skb);
__ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
struct sk_buff_head *skbs,
void (*fn)(void *data), void *data)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
unsigned long flags;
int queue, i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
__ieee80211_stop_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
while ((skb = skb_dequeue(skbs))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (WARN_ON(!info->control.vif)) {
kfree_skb(skb);
continue;
}
queue = skb_get_queue_mapping(skb);
__skb_queue_tail(&local->pending[queue], skb);
}
if (fn)
fn(data);
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_add_pending_skbs(struct ieee80211_local *local,
struct sk_buff_head *skbs)
{
ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
}
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
unsigned long flags;
int i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
__ieee80211_stop_queue(hw, i, reason);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_stop_queues(struct ieee80211_hw *hw)
{
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_DRIVER);
}
EXPORT_SYMBOL(ieee80211_stop_queues);
int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
{
struct ieee80211_local *local = hw_to_local(hw);
unsigned long flags;
int ret;
if (WARN_ON(queue >= hw->queues))
return true;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
ret = !!local->queue_stop_reasons[queue];
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return ret;
}
EXPORT_SYMBOL(ieee80211_queue_stopped);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
unsigned long flags;
int i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i, reason);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
void ieee80211_wake_queues(struct ieee80211_hw *hw)
{
ieee80211_wake_queues_by_reason(hw, IEEE80211_QUEUE_STOP_REASON_DRIVER);
}
EXPORT_SYMBOL(ieee80211_wake_queues);
void ieee80211_iterate_active_interfaces(
struct ieee80211_hw *hw,
void (*iterator)(void *data, u8 *mac,
struct ieee80211_vif *vif),
void *data)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
continue;
default:
break;
}
if (ieee80211_sdata_running(sdata))
iterator(data, sdata->vif.addr,
&sdata->vif);
}
mutex_unlock(&local->iflist_mtx);
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
void ieee80211_iterate_active_interfaces_atomic(
struct ieee80211_hw *hw,
void (*iterator)(void *data, u8 *mac,
struct ieee80211_vif *vif),
void *data)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
continue;
default:
break;
}
if (ieee80211_sdata_running(sdata))
iterator(data, sdata->vif.addr,
&sdata->vif);
}
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
/*
* Nothing should have been stuffed into the workqueue during
* the suspend->resume cycle. If this WARN is seen then there
* is a bug with either the driver suspend or something in
* mac80211 stuffing into the workqueue which we haven't yet
* cleared during mac80211's suspend cycle.
*/
static bool ieee80211_can_queue_work(struct ieee80211_local *local)
{
if (WARN(local->suspended && !local->resuming,
"queueing ieee80211 work while going to suspend\n"))
return false;
return true;
}
void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work)
{
struct ieee80211_local *local = hw_to_local(hw);
if (!ieee80211_can_queue_work(local))
return;
queue_work(local->workqueue, work);
}
EXPORT_SYMBOL(ieee80211_queue_work);
void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
struct delayed_work *dwork,
unsigned long delay)
{
struct ieee80211_local *local = hw_to_local(hw);
if (!ieee80211_can_queue_work(local))
return;
queue_delayed_work(local->workqueue, dwork, delay);
}
EXPORT_SYMBOL(ieee80211_queue_delayed_work);
u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
struct ieee802_11_elems *elems,
u64 filter, u32 crc)
{
size_t left = len;
u8 *pos = start;
bool calc_crc = filter != 0;
DECLARE_BITMAP(seen_elems, 256);
bitmap_zero(seen_elems, 256);
memset(elems, 0, sizeof(*elems));
elems->ie_start = start;
elems->total_len = len;
while (left >= 2) {
u8 id, elen;
bool elem_parse_failed;
id = *pos++;
elen = *pos++;
left -= 2;
if (elen > left) {
elems->parse_error = true;
break;
}
if (id != WLAN_EID_VENDOR_SPECIFIC &&
id != WLAN_EID_QUIET &&
test_bit(id, seen_elems)) {
elems->parse_error = true;
left -= elen;
pos += elen;
continue;
}
if (calc_crc && id < 64 && (filter & (1ULL << id)))
crc = crc32_be(crc, pos - 2, elen + 2);
elem_parse_failed = false;
switch (id) {
case WLAN_EID_SSID:
elems->ssid = pos;
elems->ssid_len = elen;
break;
case WLAN_EID_SUPP_RATES:
elems->supp_rates = pos;
elems->supp_rates_len = elen;
break;
case WLAN_EID_FH_PARAMS:
elems->fh_params = pos;
elems->fh_params_len = elen;
break;
case WLAN_EID_DS_PARAMS:
elems->ds_params = pos;
elems->ds_params_len = elen;
break;
case WLAN_EID_CF_PARAMS:
elems->cf_params = pos;
elems->cf_params_len = elen;
break;
case WLAN_EID_TIM:
if (elen >= sizeof(struct ieee80211_tim_ie)) {
elems->tim = (void *)pos;
elems->tim_len = elen;
} else
elem_parse_failed = true;
break;
case WLAN_EID_IBSS_PARAMS:
elems->ibss_params = pos;
elems->ibss_params_len = elen;
break;
case WLAN_EID_CHALLENGE:
elems->challenge = pos;
elems->challenge_len = elen;
break;
case WLAN_EID_VENDOR_SPECIFIC:
if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
pos[2] == 0xf2) {
/* Microsoft OUI (00:50:F2) */
if (calc_crc)
crc = crc32_be(crc, pos - 2, elen + 2);
if (pos[3] == 1) {
/* OUI Type 1 - WPA IE */
elems->wpa = pos;
elems->wpa_len = elen;
} else if (elen >= 5 && pos[3] == 2) {
/* OUI Type 2 - WMM IE */
if (pos[4] == 0) {
elems->wmm_info = pos;
elems->wmm_info_len = elen;
} else if (pos[4] == 1) {
elems->wmm_param = pos;
elems->wmm_param_len = elen;
}
}
}
break;
case WLAN_EID_RSN:
elems->rsn = pos;
elems->rsn_len = elen;
break;
case WLAN_EID_ERP_INFO:
elems->erp_info = pos;
elems->erp_info_len = elen;
break;
case WLAN_EID_EXT_SUPP_RATES:
elems->ext_supp_rates = pos;
elems->ext_supp_rates_len = elen;
break;
case WLAN_EID_HT_CAPABILITY:
if (elen >= sizeof(struct ieee80211_ht_cap))
elems->ht_cap_elem = (void *)pos;
else
elem_parse_failed = true;
break;
case WLAN_EID_HT_INFORMATION:
if (elen >= sizeof(struct ieee80211_ht_info))
elems->ht_info_elem = (void *)pos;
else
elem_parse_failed = true;
break;
case WLAN_EID_MESH_ID:
elems->mesh_id = pos;
elems->mesh_id_len = elen;
break;
case WLAN_EID_MESH_CONFIG:
if (elen >= sizeof(struct ieee80211_meshconf_ie))
elems->mesh_config = (void *)pos;
else
elem_parse_failed = true;
break;
case WLAN_EID_PEER_MGMT:
elems->peering = pos;
elems->peering_len = elen;
break;
case WLAN_EID_PREQ:
elems->preq = pos;
elems->preq_len = elen;
break;
case WLAN_EID_PREP:
elems->prep = pos;
elems->prep_len = elen;
break;
case WLAN_EID_PERR:
elems->perr = pos;
elems->perr_len = elen;
break;
case WLAN_EID_RANN:
if (elen >= sizeof(struct ieee80211_rann_ie))
elems->rann = (void *)pos;
else
elem_parse_failed = true;
break;
case WLAN_EID_CHANNEL_SWITCH:
elems->ch_switch_elem = pos;
elems->ch_switch_elem_len = elen;
break;
case WLAN_EID_QUIET:
if (!elems->quiet_elem) {
elems->quiet_elem = pos;
elems->quiet_elem_len = elen;
}
elems->num_of_quiet_elem++;
break;
case WLAN_EID_COUNTRY:
elems->country_elem = pos;
elems->country_elem_len = elen;
break;
case WLAN_EID_PWR_CONSTRAINT:
elems->pwr_constr_elem = pos;
elems->pwr_constr_elem_len = elen;
break;
case WLAN_EID_TIMEOUT_INTERVAL:
elems->timeout_int = pos;
elems->timeout_int_len = elen;
break;
default:
break;
}
if (elem_parse_failed)
elems->parse_error = true;
else
set_bit(id, seen_elems);
left -= elen;
pos += elen;
}
if (left != 0)
elems->parse_error = true;
return crc;
}
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems)
{
ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
}
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
bool bss_notify)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_queue_params qparam;
int queue;
bool use_11b;
int aCWmin, aCWmax;
if (!local->ops->conf_tx)
return;
memset(&qparam, 0, sizeof(qparam));
use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
for (queue = 0; queue < local->hw.queues; queue++) {
/* Set defaults according to 802.11-2007 Table 7-37 */
aCWmax = 1023;
if (use_11b)
aCWmin = 31;
else
aCWmin = 15;
switch (queue) {
case 3: /* AC_BK */
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
qparam.aifs = 7;
break;
default: /* never happens but let's not leave undefined */
case 2: /* AC_BE */
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
qparam.aifs = 3;
break;
case 1: /* AC_VI */
qparam.cw_max = aCWmin;
qparam.cw_min = (aCWmin + 1) / 2 - 1;
if (use_11b)
qparam.txop = 6016/32;
else
qparam.txop = 3008/32;
qparam.aifs = 2;
break;
case 0: /* AC_VO */
qparam.cw_max = (aCWmin + 1) / 2 - 1;
qparam.cw_min = (aCWmin + 1) / 4 - 1;
if (use_11b)
qparam.txop = 3264/32;
else
qparam.txop = 1504/32;
qparam.aifs = 2;
break;
}
qparam.uapsd = false;
sdata->tx_conf[queue] = qparam;
drv_conf_tx(local, sdata, queue, &qparam);
}
/* after reinitialize QoS TX queues setting to default,
* disable QoS at all */
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
sdata->vif.bss_conf.qos =
sdata->vif.type != NL80211_IFTYPE_STATION;
if (bss_notify)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_QOS);
}
}
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const size_t supp_rates_len,
const u8 *supp_rates)
{
struct ieee80211_local *local = sdata->local;
int i, have_higher_than_11mbit = 0;
/* cf. IEEE 802.11 9.2.12 */
for (i = 0; i < supp_rates_len; i++)
if ((supp_rates[i] & 0x7f) * 5 > 110)
have_higher_than_11mbit = 1;
if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
have_higher_than_11mbit)
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
ieee80211_set_wmm_default(sdata, true);
}
u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
enum ieee80211_band band)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
u32 mandatory_rates;
enum ieee80211_rate_flags mandatory_flag;
int i;
sband = local->hw.wiphy->bands[band];
if (!sband) {
WARN_ON(1);
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
}
if (band == IEEE80211_BAND_2GHZ)
mandatory_flag = IEEE80211_RATE_MANDATORY_B;
else
mandatory_flag = IEEE80211_RATE_MANDATORY_A;
bitrates = sband->bitrates;
mandatory_rates = 0;
for (i = 0; i < sband->n_bitrates; i++)
if (bitrates[i].flags & mandatory_flag)
mandatory_rates |= BIT(i);
return mandatory_rates;
}
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
u8 *extra, size_t extra_len, const u8 *da,
const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
int err;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 6 + extra_len);
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
memset(mgmt, 0, 24 + 6);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
mgmt->u.auth.status_code = cpu_to_le16(0);
if (extra)
memcpy(skb_put(skb, extra_len), extra, extra_len);
if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) {
mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx);
WARN_ON(err);
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
u8 channel)
{
struct ieee80211_supported_band *sband;
u8 *pos;
size_t offset = 0, noffset;
int supp_rates_len, i;
u8 rates[32];
int num_rates;
int ext_rates_len;
sband = local->hw.wiphy->bands[band];
pos = buffer;
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((BIT(i) & rate_mask) == 0)
continue; /* skip rate */
rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
}
supp_rates_len = min_t(int, num_rates, 8);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = supp_rates_len;
memcpy(pos, rates, supp_rates_len);
pos += supp_rates_len;
/* insert "request information" if in custom IEs */
if (ie && ie_len) {
static const u8 before_extrates[] = {
WLAN_EID_SSID,
WLAN_EID_SUPP_RATES,
WLAN_EID_REQUEST,
};
noffset = ieee80211_ie_split(ie, ie_len,
before_extrates,
ARRAY_SIZE(before_extrates),
offset);
memcpy(pos, ie + offset, noffset - offset);
pos += noffset - offset;
offset = noffset;
}
ext_rates_len = num_rates - supp_rates_len;
if (ext_rates_len > 0) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = ext_rates_len;
memcpy(pos, rates + supp_rates_len, ext_rates_len);
pos += ext_rates_len;
}
if (channel && sband->band == IEEE80211_BAND_2GHZ) {
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
*pos++ = channel;
}
/* insert custom IEs that go before HT */
if (ie && ie_len) {
static const u8 before_ht[] = {
WLAN_EID_SSID,
WLAN_EID_SUPP_RATES,
WLAN_EID_REQUEST,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_DS_PARAMS,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
};
noffset = ieee80211_ie_split(ie, ie_len,
before_ht, ARRAY_SIZE(before_ht),
offset);
memcpy(pos, ie + offset, noffset - offset);
pos += noffset - offset;
offset = noffset;
}
if (sband->ht_cap.ht_supported)
pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
sband->ht_cap.cap);
/*
* If adding more here, adjust code in main.c
* that calculates local->scan_ies_len.
*/
/* add any remaining custom IEs */
if (ie && ie_len) {
noffset = ie_len;
memcpy(pos, ie + offset, noffset - offset);
pos += noffset - offset;
}
if (sband->vht_cap.vht_supported)
pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
sband->vht_cap.cap);
return pos - buffer;
}
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
bool directed)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
size_t buf_len;
u8 *buf;
u8 chan;
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
if (!buf)
return NULL;
/*
* Do not send DS Channel parameter for directed probe requests
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
if (directed)
chan = 0;
else
chan = ieee80211_frequency_to_channel(
local->hw.conf.channel->center_freq);
buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
local->hw.conf.channel->band,
ratemask, chan);
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len,
buf, buf_len);
if (!skb)
goto out;
if (dst) {
mgmt = (struct ieee80211_mgmt *) skb->data;
memcpy(mgmt->da, dst, ETH_ALEN);
memcpy(mgmt->bssid, dst, ETH_ALEN);
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
out:
kfree(buf);
return skb;
}
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
u32 ratemask, bool directed, bool no_cck)
{
struct sk_buff *skb;
skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
ie, ie_len, directed);
if (skb) {
if (no_cck)
IEEE80211_SKB_CB(skb)->flags |=
IEEE80211_TX_CTL_NO_CCK_RATE;
ieee80211_tx_skb(sdata, skb);
}
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
enum ieee80211_band band)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
size_t num_rates;
u32 supp_rates;
int i, j;
sband = local->hw.wiphy->bands[band];
if (!sband) {
WARN_ON(1);
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
}
bitrates = sband->bitrates;
num_rates = sband->n_bitrates;
supp_rates = 0;
for (i = 0; i < elems->supp_rates_len +
elems->ext_supp_rates_len; i++) {
u8 rate = 0;
int own_rate;
if (i < elems->supp_rates_len)
rate = elems->supp_rates[i];
else if (elems->ext_supp_rates)
rate = elems->ext_supp_rates
[i - elems->supp_rates_len];
own_rate = 5 * (rate & 0x7f);
for (j = 0; j < num_rates; j++)
if (bitrates[j].bitrate == own_rate)
supp_rates |= BIT(j);
}
return supp_rates;
}
void ieee80211_stop_device(struct ieee80211_local *local)
{
ieee80211_led_radio(local, false);
ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
cancel_work_sync(&local->reconfig_filter);
flush_workqueue(local->workqueue);
drv_stop(local);
}
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
int res, i;
#ifdef CONFIG_PM
if (local->suspended)
local->resuming = true;
if (local->wowlan) {
local->wowlan = false;
res = drv_resume(local);
if (res < 0) {
local->resuming = false;
return res;
}
if (res == 0)
goto wake_up;
WARN_ON(res > 1);
/*
* res is 1, which means the driver requested
* to go through a regular reset on wakeup.
*/
}
#endif
/* everything else happens only if HW was up & running */
if (!local->open_count)
goto wake_up;
/*
* Upon resume hardware can sometimes be goofy due to
* various platform / driver / bus issues, so restarting
* the device may at times not work immediately. Propagate
* the error.
*/
res = drv_start(local);
if (res) {
WARN(local->suspended, "Hardware became unavailable "
"upon resume. This could be a software issue "
"prior to suspend or a hardware issue.\n");
return res;
}
/* setup fragmentation threshold */
drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
/* setup RTS threshold */
drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
/* reset coverage class */
drv_set_coverage_class(local, hw->wiphy->coverage_class);
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
/* add interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
ieee80211_sdata_running(sdata))
res = drv_add_interface(local, sdata);
}
/* add STAs back */
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
if (sta->uploaded) {
enum ieee80211_sta_state state;
for (state = IEEE80211_STA_NOTEXIST;
state < sta->sta_state - 1; state++)
WARN_ON(drv_sta_state(local, sta->sdata, sta,
state, state + 1));
}
}
mutex_unlock(&local->sta_mtx);
/* reconfigure tx conf */
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_MONITOR ||
!ieee80211_sdata_running(sdata))
continue;
for (i = 0; i < hw->queues; i++)
drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
}
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
ieee80211_configure_filter(local);
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
u32 changed;
if (!ieee80211_sdata_running(sdata))
continue;
/* common change flags for all interface types */
changed = BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT |
BSS_CHANGED_HT |
BSS_CHANGED_BASIC_RATES |
BSS_CHANGED_BEACON_INT |
BSS_CHANGED_BSSID |
BSS_CHANGED_CQM |
BSS_CHANGED_QOS |
BSS_CHANGED_IDLE;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
changed |= BSS_CHANGED_ASSOC |
BSS_CHANGED_ARP_FILTER;
mutex_lock(&sdata->u.mgd.mtx);
ieee80211_bss_info_change_notify(sdata, changed);
mutex_unlock(&sdata->u.mgd.mtx);
break;
case NL80211_IFTYPE_ADHOC:
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID;
if (sdata->vif.type == NL80211_IFTYPE_AP)
changed |= BSS_CHANGED_AP_PROBE_RESP;
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED;
ieee80211_bss_info_change_notify(sdata, changed);
break;
case NL80211_IFTYPE_WDS:
break;
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
/* ignore virtual */
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NUM_NL80211_IFTYPES:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
WARN_ON(1);
break;
}
}
ieee80211_recalc_ps(local, -1);
/*
* The sta might be in psm against the ap (e.g. because
* this was the state before a hw restart), so we
* explicitly send a null packet in order to make sure
* it'll sync against the ap (and get out of psm).
*/
if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
ieee80211_send_nullfunc(local, sdata, 0);
}
}
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
*
* Also tear down aggregation sessions since reconfiguring
* them in a hardware restart scenario is not easily done
* right now, and the hardware will have lost information
* about the sessions, but we and the AP still think they
* are active. This is really a workaround though.
*/
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta, true);
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
}
mutex_unlock(&local->sta_mtx);
}
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
wake_up:
ieee80211_wake_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
/*
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
if (!local->suspended)
return 0;
#ifdef CONFIG_PM
/* first set suspended false, then resuming */
local->suspended = false;
mb();
local->resuming = false;
list_for_each_entry(sdata, &local->interfaces, list) {
switch(sdata->vif.type) {
case NL80211_IFTYPE_STATION:
ieee80211_sta_restart(sdata);
break;
case NL80211_IFTYPE_ADHOC:
ieee80211_ibss_restart(sdata);
break;
case NL80211_IFTYPE_MESH_POINT:
ieee80211_mesh_restart(sdata);
break;
default:
break;
}
}
mod_timer(&local->sta_cleanup, jiffies + 1);
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list)
mesh_plink_restart(sta);
mutex_unlock(&local->sta_mtx);
#else
WARN_ON(1);
#endif
return 0;
}
void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_local *local;
struct ieee80211_key *key;
if (WARN_ON(!vif))
return;
sdata = vif_to_sdata(vif);
local = sdata->local;
if (WARN_ON(!local->resuming))
return;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return;
sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME;
mutex_lock(&local->key_mtx);
list_for_each_entry(key, &sdata->key_list, list)
key->flags |= KEY_FLAG_TAINTED;
mutex_unlock(&local->key_mtx);
}
EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
enum ieee80211_smps_mode *smps_mode)
{
if (ifmgd->associated) {
*smps_mode = ifmgd->ap_smps;
if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
if (ifmgd->powersave)
*smps_mode = IEEE80211_SMPS_DYNAMIC;
else
*smps_mode = IEEE80211_SMPS_OFF;
}
return 1;
}
return 0;
}
/* must hold iflist_mtx */
void ieee80211_recalc_smps(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
int count = 0;
lockdep_assert_held(&local->iflist_mtx);
/*
* This function could be improved to handle multiple
* interfaces better, but right now it makes any
* non-station interfaces force SM PS to be turned
* off. If there are multiple station interfaces it
* could also use the best possible mode, e.g. if
* one is in static and the other in dynamic then
* dynamic is ok.
*/
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
goto set;
count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
if (count > 1) {
smps_mode = IEEE80211_SMPS_OFF;
break;
}
}
if (smps_mode == local->smps_mode)
return;
set:
local->smps_mode = smps_mode;
/* changed flag is auto-detected for this */
ieee80211_hw_config(local, 0);
}
static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
{
int i;
for (i = 0; i < n_ids; i++)
if (ids[i] == id)
return true;
return false;
}
/**
* ieee80211_ie_split - split an IE buffer according to ordering
*
* @ies: the IE buffer
* @ielen: the length of the IE buffer
* @ids: an array with element IDs that are allowed before
* the split
* @n_ids: the size of the element ID array
* @offset: offset where to start splitting in the buffer
*
* This function splits an IE buffer by updating the @offset
* variable to point to the location where the buffer should be
* split.
*
* It assumes that the given IE buffer is well-formed, this
* has to be guaranteed by the caller!
*
* It also assumes that the IEs in the buffer are ordered
* correctly, if not the result of using this function will not
* be ordered correctly either, i.e. it does no reordering.
*
* The function returns the offset where the next part of the
* buffer starts, which may be @ielen if the entire (remainder)
* of the buffer should be used.
*/
size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
const u8 *ids, int n_ids, size_t offset)
{
size_t pos = offset;
while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
pos += 2 + ies[pos + 1];
return pos;
}
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
{
size_t pos = offset;
while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
pos += 2 + ies[pos + 1];
return pos;
}
static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
int rssi_min_thold,
int rssi_max_thold)
{
trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
/*
* Scale up threshold values before storing it, as the RSSI averaging
* algorithm uses a scaled up value as well. Change this scaling
* factor if the RSSI averaging algorithm changes.
*/
sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
}
void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
int rssi_min_thold,
int rssi_max_thold)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
WARN_ON(rssi_min_thold == rssi_max_thold ||
rssi_min_thold > rssi_max_thold);
_ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
rssi_max_thold);
}
EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
_ieee80211_enable_rssi_reports(sdata, 0, 0);
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 cap)
{
__le16 tmp;
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
/* capability flags */
tmp = cpu_to_le16(cap);
memcpy(pos, &tmp, sizeof(u16));
pos += sizeof(u16);
/* AMPDU parameters */
*pos++ = ht_cap->ampdu_factor |
(ht_cap->ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
/* MCS set */
memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
pos += sizeof(ht_cap->mcs);
/* extended capabilities */
pos += sizeof(__le16);
/* BF capabilities */
pos += sizeof(__le32);
/* antenna selection */
pos += sizeof(u8);
return pos;
}
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap)
{
__le32 tmp;
*pos++ = WLAN_EID_VHT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_vht_cap);
memset(pos, 0, sizeof(struct ieee80211_vht_cap));
/* capability flags */
tmp = cpu_to_le32(cap);
memcpy(pos, &tmp, sizeof(u32));
pos += sizeof(u32);
/* VHT MCS set */
memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
pos += sizeof(vht_cap->vht_mcs);
return pos;
}
u8 *ieee80211_ie_build_ht_info(u8 *pos,
struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type)
{
struct ieee80211_ht_info *ht_info;
/* Build HT Information */
*pos++ = WLAN_EID_HT_INFORMATION;
*pos++ = sizeof(struct ieee80211_ht_info);
ht_info = (struct ieee80211_ht_info *)pos;
ht_info->control_chan =
ieee80211_frequency_to_channel(channel->center_freq);
switch (channel_type) {
case NL80211_CHAN_HT40MINUS:
ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
break;
case NL80211_CHAN_HT40PLUS:
ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
break;
case NL80211_CHAN_HT20:
default:
ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
break;
}
if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
/*
* Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
* RIFS Mode are reserved in IBSS mode, therefore keep them at 0
*/
ht_info->operation_mode = 0x0000;
ht_info->stbc_param = 0x0000;
/* It seems that Basic MCS set and Supported MCS set
are identical for the first 10 bytes */
memset(&ht_info->basic_set, 0, 16);
memcpy(&ht_info->basic_set, &ht_cap->mcs, 10);
return pos + sizeof(struct ieee80211_ht_info);
}
enum nl80211_channel_type
ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
{
enum nl80211_channel_type channel_type;
if (!ht_info)
return NL80211_CHAN_NO_HT;
switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_NONE:
channel_type = NL80211_CHAN_HT20;
break;
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
channel_type = NL80211_CHAN_HT40PLUS;
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
channel_type = NL80211_CHAN_HT40MINUS;
break;
default:
channel_type = NL80211_CHAN_NO_HT;
}
return channel_type;
}
int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, rates, *pos;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
rates = sband->n_bitrates;
if (rates > 8)
rates = 8;
if (skb_tailroom(skb) < rates + 2)
return -ENOMEM;
pos = skb_put(skb, rates + 2);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = rates;
for (i = 0; i < rates; i++) {
rate = sband->bitrates[i].bitrate;
*pos++ = (u8) (rate / 5);
}
return 0;
}
int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, exrates, *pos;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
exrates = sband->n_bitrates;
if (exrates > 8)
exrates -= 8;
else
exrates = 0;
if (skb_tailroom(skb) < exrates + 2)
return -ENOMEM;
if (exrates) {
pos = skb_put(skb, exrates + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
for (i = 8; i < sband->n_bitrates; i++) {
rate = sband->bitrates[i].bitrate;
*pos++ = (u8) (rate / 5);
}
}
return 0;
}
| gpl-2.0 |
squllcx/Axon7 | drivers/gpio/gpio-wm8994.c | 1385 | 7596 | /*
* gpiolib support for Wolfson WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/gpio.h>
#include <linux/mfd/wm8994/registers.h>
struct wm8994_gpio {
struct wm8994 *wm8994;
struct gpio_chip gpio_chip;
};
static inline struct wm8994_gpio *to_wm8994_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct wm8994_gpio, gpio_chip);
}
static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
switch (wm8994->type) {
case WM8958:
switch (offset) {
case 1:
case 2:
case 3:
case 4:
case 6:
return -EINVAL;
}
break;
default:
break;
}
return 0;
}
static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_DIR, WM8994_GPN_DIR);
}
static int wm8994_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
int ret;
ret = wm8994_reg_read(wm8994, WM8994_GPIO_1 + offset);
if (ret < 0)
return ret;
if (ret & WM8994_GPN_LVL)
return 1;
else
return 0;
}
static int wm8994_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
if (value)
value = WM8994_GPN_LVL;
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
if (value)
value = WM8994_GPN_LVL;
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
return regmap_irq_get_virq(wm8994->irq_data, offset);
}
#ifdef CONFIG_DEBUG_FS
static const char *wm8994_gpio_fn(u16 fn)
{
switch (fn) {
case WM8994_GP_FN_PIN_SPECIFIC:
return "pin-specific";
case WM8994_GP_FN_GPIO:
return "GPIO";
case WM8994_GP_FN_SDOUT:
return "SDOUT";
case WM8994_GP_FN_IRQ:
return "IRQ";
case WM8994_GP_FN_TEMPERATURE:
return "Temperature";
case WM8994_GP_FN_MICBIAS1_DET:
return "MICBIAS1 detect";
case WM8994_GP_FN_MICBIAS1_SHORT:
return "MICBIAS1 short";
case WM8994_GP_FN_MICBIAS2_DET:
return "MICBIAS2 detect";
case WM8994_GP_FN_MICBIAS2_SHORT:
return "MICBIAS2 short";
case WM8994_GP_FN_FLL1_LOCK:
return "FLL1 lock";
case WM8994_GP_FN_FLL2_LOCK:
return "FLL2 lock";
case WM8994_GP_FN_SRC1_LOCK:
return "SRC1 lock";
case WM8994_GP_FN_SRC2_LOCK:
return "SRC2 lock";
case WM8994_GP_FN_DRC1_ACT:
return "DRC1 activity";
case WM8994_GP_FN_DRC2_ACT:
return "DRC2 activity";
case WM8994_GP_FN_DRC3_ACT:
return "DRC3 activity";
case WM8994_GP_FN_WSEQ_STATUS:
return "Write sequencer";
case WM8994_GP_FN_FIFO_ERROR:
return "FIFO error";
case WM8994_GP_FN_OPCLK:
return "OPCLK";
case WM8994_GP_FN_THW:
return "Thermal warning";
case WM8994_GP_FN_DCS_DONE:
return "DC servo";
case WM8994_GP_FN_FLL1_OUT:
return "FLL1 output";
case WM8994_GP_FN_FLL2_OUT:
return "FLL1 output";
default:
return "Unknown";
}
}
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
int i;
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
const char *label;
/* We report the GPIO even if it's not requested since
* we're also reporting things like alternate
* functions which apply even when the GPIO is not in
* use as a GPIO.
*/
label = gpiochip_is_requested(chip, i);
if (!label)
label = "Unrequested";
seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
"GPIO control %d read failed: %d\n",
gpio, reg);
seq_printf(s, "\n");
continue;
}
if (reg & WM8994_GPN_DIR)
seq_printf(s, "in ");
else
seq_printf(s, "out ");
if (reg & WM8994_GPN_PU)
seq_printf(s, "pull up ");
if (reg & WM8994_GPN_PD)
seq_printf(s, "pull down ");
if (reg & WM8994_GPN_POL)
seq_printf(s, "inverted ");
else
seq_printf(s, "noninverted ");
if (reg & WM8994_GPN_OP_CFG)
seq_printf(s, "open drain ");
else
seq_printf(s, "CMOS ");
seq_printf(s, "%s (%x)\n",
wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
}
}
#else
#define wm8994_gpio_dbg_show NULL
#endif
static struct gpio_chip template_chip = {
.label = "wm8994",
.owner = THIS_MODULE,
.request = wm8994_gpio_request,
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = true,
};
static int wm8994_gpio_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
struct wm8994_gpio *wm8994_gpio;
int ret;
wm8994_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8994_gpio),
GFP_KERNEL);
if (wm8994_gpio == NULL)
return -ENOMEM;
wm8994_gpio->wm8994 = wm8994;
wm8994_gpio->gpio_chip = template_chip;
wm8994_gpio->gpio_chip.ngpio = WM8994_GPIO_MAX;
wm8994_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm8994_gpio->gpio_chip.base = pdata->gpio_base;
else
wm8994_gpio->gpio_chip.base = -1;
ret = gpiochip_add(&wm8994_gpio->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
goto err;
}
platform_set_drvdata(pdev, wm8994_gpio);
return ret;
err:
return ret;
}
static int wm8994_gpio_remove(struct platform_device *pdev)
{
struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
gpiochip_remove(&wm8994_gpio->gpio_chip);
return 0;
}
static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
.remove = wm8994_gpio_remove,
};
static int __init wm8994_gpio_init(void)
{
return platform_driver_register(&wm8994_gpio_driver);
}
subsys_initcall(wm8994_gpio_init);
static void __exit wm8994_gpio_exit(void)
{
platform_driver_unregister(&wm8994_gpio_driver);
}
module_exit(wm8994_gpio_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("GPIO interface for WM8994");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8994-gpio");
| gpl-2.0 |
kenkit/android_kernel_htc_msm7x30 | fs/cachefiles/namei.c | 3177 | 25200 | /* CacheFiles path walking and related routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/slab.h>
#include "internal.h"
#define CACHEFILES_KEYBUF_SIZE 512
/*
* dump debugging info about an object
*/
static noinline
void __cachefiles_printk_object(struct cachefiles_object *object,
const char *prefix,
u8 *keybuf)
{
struct fscache_cookie *cookie;
unsigned keylen, loop;
printk(KERN_ERR "%sobject: OBJ%x\n",
prefix, object->fscache.debug_id);
printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
prefix, fscache_object_states[object->fscache.state],
object->fscache.flags, work_busy(&object->fscache.work),
object->fscache.events,
object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
prefix, object->fscache.n_ops, object->fscache.n_in_progress,
object->fscache.n_exclusive);
printk(KERN_ERR "%sparent=%p\n",
prefix, object->fscache.parent);
spin_lock(&object->fscache.lock);
cookie = object->fscache.cookie;
if (cookie) {
printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
prefix,
object->fscache.cookie,
object->fscache.cookie->parent,
object->fscache.cookie->netfs_data,
object->fscache.cookie->flags);
if (keybuf)
keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
CACHEFILES_KEYBUF_SIZE);
else
keylen = 0;
} else {
printk(KERN_ERR "%scookie=NULL\n", prefix);
keylen = 0;
}
spin_unlock(&object->fscache.lock);
if (keylen) {
printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
for (loop = 0; loop < keylen; loop++)
printk("%02x", keybuf[loop]);
printk("'\n");
}
}
/*
* dump debugging info about a pair of objects
*/
static noinline void cachefiles_printk_object(struct cachefiles_object *object,
struct cachefiles_object *xobject)
{
u8 *keybuf;
keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
if (object)
__cachefiles_printk_object(object, "", keybuf);
if (xobject)
__cachefiles_printk_object(xobject, "x", keybuf);
kfree(keybuf);
}
/*
* mark the owner of a dentry, if there is one, to indicate that that dentry
* has been preemptively deleted
* - the caller must hold the i_mutex on the dentry's parent as required to
* call vfs_unlink(), vfs_rmdir() or vfs_rename()
*/
static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
struct dentry *dentry)
{
struct cachefiles_object *object;
struct rb_node *p;
_enter(",'%*.*s'",
dentry->d_name.len, dentry->d_name.len, dentry->d_name.name);
write_lock(&cache->active_lock);
p = cache->active_nodes.rb_node;
while (p) {
object = rb_entry(p, struct cachefiles_object, active_node);
if (object->dentry > dentry)
p = p->rb_left;
else if (object->dentry < dentry)
p = p->rb_right;
else
goto found_dentry;
}
write_unlock(&cache->active_lock);
_leave(" [no owner]");
return;
/* found the dentry for */
found_dentry:
kdebug("preemptive burial: OBJ%x [%s] %p",
object->fscache.debug_id,
fscache_object_states[object->fscache.state],
dentry);
if (object->fscache.state < FSCACHE_OBJECT_DYING) {
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error:"
" Can't preemptively bury live object\n");
cachefiles_printk_object(object, NULL);
} else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
printk(KERN_ERR "CacheFiles: Error:"
" Object already preemptively buried\n");
}
write_unlock(&cache->active_lock);
_leave(" [owner marked]");
}
/*
* record the fact that an object is now active
*/
static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
struct cachefiles_object *xobject;
struct rb_node **_p, *_parent = NULL;
struct dentry *dentry;
_enter(",%p", object);
try_again:
write_lock(&cache->active_lock);
if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
printk(KERN_ERR "CacheFiles: Error: Object already active\n");
cachefiles_printk_object(object, NULL);
BUG();
}
dentry = object->dentry;
_p = &cache->active_nodes.rb_node;
while (*_p) {
_parent = *_p;
xobject = rb_entry(_parent,
struct cachefiles_object, active_node);
ASSERT(xobject != object);
if (xobject->dentry > dentry)
_p = &(*_p)->rb_left;
else if (xobject->dentry < dentry)
_p = &(*_p)->rb_right;
else
goto wait_for_old_object;
}
rb_link_node(&object->active_node, _parent, _p);
rb_insert_color(&object->active_node, &cache->active_nodes);
write_unlock(&cache->active_lock);
_leave(" = 0");
return 0;
/* an old object from a previous incarnation is hogging the slot - we
* need to wait for it to be destroyed */
wait_for_old_object:
if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error:"
" Unexpected object collision\n");
cachefiles_printk_object(object, xobject);
BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
wait_queue_head_t *wq;
signed long timeout = 60 * HZ;
wait_queue_t wait;
bool requeue;
/* if the object we're waiting for is queued for processing,
* then just put ourselves on the queue behind it */
if (work_pending(&xobject->fscache.work)) {
_debug("queue OBJ%x behind OBJ%x immediately",
object->fscache.debug_id,
xobject->fscache.debug_id);
goto requeue;
}
/* otherwise we sleep until either the object we're waiting for
* is done, or the fscache_object is congested */
wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
init_wait(&wait);
requeue = false;
do {
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
break;
requeue = fscache_object_sleep_till_congested(&timeout);
} while (timeout > 0 && !requeue);
finish_wait(wq, &wait);
if (requeue &&
test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
_debug("queue OBJ%x behind OBJ%x after wait",
object->fscache.debug_id,
xobject->fscache.debug_id);
goto requeue;
}
if (timeout <= 0) {
printk(KERN_ERR "\n");
printk(KERN_ERR "CacheFiles: Error: Overlong"
" wait for old active object to go away\n");
cachefiles_printk_object(object, xobject);
goto requeue;
}
}
ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
cache->cache.ops->put_object(&xobject->fscache);
goto try_again;
requeue:
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
cache->cache.ops->put_object(&xobject->fscache);
_leave(" = -ETIMEDOUT");
return -ETIMEDOUT;
}
/*
* delete an object representation from the cache
* - file backed objects are unlinked
* - directory backed objects are stuffed into the graveyard for userspace to
* delete
* - unlocks the directory mutex
*/
static int cachefiles_bury_object(struct cachefiles_cache *cache,
struct dentry *dir,
struct dentry *rep,
bool preemptive)
{
struct dentry *grave, *trap;
struct path path, path_to_graveyard;
char nbuffer[8 + 8 + 1];
int ret;
_enter(",'%*.*s','%*.*s'",
dir->d_name.len, dir->d_name.len, dir->d_name.name,
rep->d_name.len, rep->d_name.len, rep->d_name.name);
_debug("remove %p from %p", rep, dir);
/* non-directories can just be unlinked */
if (!S_ISDIR(rep->d_inode->i_mode)) {
_debug("unlink stale object");
path.mnt = cache->mnt;
path.dentry = dir;
ret = security_path_unlink(&path, rep);
if (ret < 0) {
cachefiles_io_error(cache, "Unlink security error");
} else {
ret = vfs_unlink(dir->d_inode, rep);
if (preemptive)
cachefiles_mark_object_buried(cache, rep);
}
mutex_unlock(&dir->d_inode->i_mutex);
if (ret == -EIO)
cachefiles_io_error(cache, "Unlink failed");
_leave(" = %d", ret);
return ret;
}
/* directories have to be moved to the graveyard */
_debug("move stale object to graveyard");
mutex_unlock(&dir->d_inode->i_mutex);
try_again:
/* first step is to make up a grave dentry in the graveyard */
sprintf(nbuffer, "%08x%08x",
(uint32_t) get_seconds(),
(uint32_t) atomic_inc_return(&cache->gravecounter));
/* do the multiway lock magic */
trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */
if (rep->d_parent != dir) {
/* the entry was probably culled when we dropped the parent dir
* lock */
unlock_rename(cache->graveyard, dir);
_leave(" = 0 [culled?]");
return 0;
}
if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "Graveyard no longer a directory");
return -EIO;
}
if (trap == rep) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "May not make directory loop");
return -EIO;
}
if (d_mountpoint(rep)) {
unlock_rename(cache->graveyard, dir);
cachefiles_io_error(cache, "Mountpoint in cache");
return -EIO;
}
grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
if (IS_ERR(grave)) {
unlock_rename(cache->graveyard, dir);
if (PTR_ERR(grave) == -ENOMEM) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
cachefiles_io_error(cache, "Lookup error %ld",
PTR_ERR(grave));
return -EIO;
}
if (grave->d_inode) {
unlock_rename(cache->graveyard, dir);
dput(grave);
grave = NULL;
cond_resched();
goto try_again;
}
if (d_mountpoint(grave)) {
unlock_rename(cache->graveyard, dir);
dput(grave);
cachefiles_io_error(cache, "Mountpoint in graveyard");
return -EIO;
}
/* target should not be an ancestor of source */
if (trap == grave) {
unlock_rename(cache->graveyard, dir);
dput(grave);
cachefiles_io_error(cache, "May not make directory loop");
return -EIO;
}
/* attempt the rename */
path.mnt = cache->mnt;
path.dentry = dir;
path_to_graveyard.mnt = cache->mnt;
path_to_graveyard.dentry = cache->graveyard;
ret = security_path_rename(&path, rep, &path_to_graveyard, grave);
if (ret < 0) {
cachefiles_io_error(cache, "Rename security error %d", ret);
} else {
ret = vfs_rename(dir->d_inode, rep,
cache->graveyard->d_inode, grave);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
if (preemptive)
cachefiles_mark_object_buried(cache, rep);
}
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
return 0;
}
/*
* delete an object representation from the cache
*/
int cachefiles_delete_object(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
struct dentry *dir;
int ret;
_enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
ASSERT(object->dentry);
ASSERT(object->dentry->d_inode);
ASSERT(object->dentry->d_parent);
dir = dget_parent(object->dentry);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) {
/* object allocation for the same key preemptively deleted this
* object's file so that it could create its own file */
_debug("object preemptively buried");
mutex_unlock(&dir->d_inode->i_mutex);
ret = 0;
} else {
/* we need to check that our parent is _still_ our parent - it
* may have been renamed */
if (dir == object->dentry->d_parent) {
ret = cachefiles_bury_object(cache, dir,
object->dentry, false);
} else {
/* it got moved, presumably by cachefilesd culling it,
* so it's no longer in the key path and we can ignore
* it */
mutex_unlock(&dir->d_inode->i_mutex);
ret = 0;
}
}
dput(dir);
_leave(" = %d", ret);
return ret;
}
/*
* walk from the parent object to the child object through the backing
* filesystem, creating directories as we go
*/
int cachefiles_walk_to_object(struct cachefiles_object *parent,
struct cachefiles_object *object,
const char *key,
struct cachefiles_xattr *auxdata)
{
struct cachefiles_cache *cache;
struct dentry *dir, *next = NULL;
struct path path;
unsigned long start;
const char *name;
int ret, nlen;
_enter("OBJ%x{%p},OBJ%x,%s,",
parent->fscache.debug_id, parent->dentry,
object->fscache.debug_id, key);
cache = container_of(parent->fscache.cache,
struct cachefiles_cache, cache);
path.mnt = cache->mnt;
ASSERT(parent->dentry);
ASSERT(parent->dentry->d_inode);
if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
// TODO: convert file to dir
_leave("looking up in none directory");
return -ENOBUFS;
}
dir = dget(parent->dentry);
advance:
/* attempt to transit the first directory component */
name = key;
nlen = strlen(key);
/* key ends in a double NUL */
key = key + nlen + 1;
if (!*key)
key = NULL;
lookup_again:
/* search the current directory for the element name */
_debug("lookup '%s'", name);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(next))
goto lookup_error;
_debug("next -> %p %s", next, next->d_inode ? "positive" : "negative");
if (!key)
object->new = !next->d_inode;
/* if this element of the path doesn't exist, then the lookup phase
* failed, and we can release any readers in the certain knowledge that
* there's nothing for them to actually read */
if (!next->d_inode)
fscache_object_lookup_negative(&object->fscache);
/* we need to create the object if it's negative */
if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
/* index objects and intervening tree levels must be subdirs */
if (!next->d_inode) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto create_error;
path.dentry = dir;
ret = security_path_mkdir(&path, next, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_mkdir(dir->d_inode, next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (ret < 0)
goto create_error;
ASSERT(next->d_inode);
_debug("mkdir -> %p{%p{ino=%lu}}",
next, next->d_inode, next->d_inode->i_ino);
} else if (!S_ISDIR(next->d_inode->i_mode)) {
kerror("inode %lu is not a directory",
next->d_inode->i_ino);
ret = -ENOBUFS;
goto error;
}
} else {
/* non-index objects start out life as files */
if (!next->d_inode) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto create_error;
path.dentry = dir;
ret = security_path_mknod(&path, next, S_IFREG, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
cachefiles_hist(cachefiles_create_histogram, start);
if (ret < 0)
goto create_error;
ASSERT(next->d_inode);
_debug("create -> %p{%p{ino=%lu}}",
next, next->d_inode, next->d_inode->i_ino);
} else if (!S_ISDIR(next->d_inode->i_mode) &&
!S_ISREG(next->d_inode->i_mode)
) {
kerror("inode %lu is not a file or directory",
next->d_inode->i_ino);
ret = -ENOBUFS;
goto error;
}
}
/* process the next component */
if (key) {
_debug("advance");
mutex_unlock(&dir->d_inode->i_mutex);
dput(dir);
dir = next;
next = NULL;
goto advance;
}
/* we've found the object we were looking for */
object->dentry = next;
/* if we've found that the terminal object exists, then we need to
* check its attributes and delete it if it's out of date */
if (!object->new) {
_debug("validate '%*.*s'",
next->d_name.len, next->d_name.len, next->d_name.name);
ret = cachefiles_check_object_xattr(object, auxdata);
if (ret == -ESTALE) {
/* delete the object (the deleter drops the directory
* mutex) */
object->dentry = NULL;
ret = cachefiles_bury_object(cache, dir, next, true);
dput(next);
next = NULL;
if (ret < 0)
goto delete_error;
_debug("redo lookup");
goto lookup_again;
}
}
/* note that we're now using this object */
ret = cachefiles_mark_object_active(cache, object);
mutex_unlock(&dir->d_inode->i_mutex);
dput(dir);
dir = NULL;
if (ret == -ETIMEDOUT)
goto mark_active_timed_out;
_debug("=== OBTAINED_OBJECT ===");
if (object->new) {
/* attach data to a newly constructed terminal object */
ret = cachefiles_set_object_xattr(object, auxdata);
if (ret < 0)
goto check_error;
} else {
/* always update the atime on an object we've just looked up
* (this is used to keep track of culling, and atimes are only
* updated by read, write and readdir but not lookup or
* open) */
touch_atime(cache->mnt, next);
}
/* open a file interface onto a data file */
if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
if (S_ISREG(object->dentry->d_inode->i_mode)) {
const struct address_space_operations *aops;
ret = -EPERM;
aops = object->dentry->d_inode->i_mapping->a_ops;
if (!aops->bmap)
goto check_error;
object->backer = object->dentry;
} else {
BUG(); // TODO: open file in data-class subdir
}
}
object->new = 0;
fscache_obtained_object(&object->fscache);
_leave(" = 0 [%lu]", object->dentry->d_inode->i_ino);
return 0;
create_error:
_debug("create error %d", ret);
if (ret == -EIO)
cachefiles_io_error(cache, "Create/mkdir failed");
goto error;
mark_active_timed_out:
_debug("mark active timed out");
goto release_dentry;
check_error:
_debug("check error %d", ret);
write_lock(&cache->active_lock);
rb_erase(&object->active_node, &cache->active_nodes);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
write_unlock(&cache->active_lock);
release_dentry:
dput(object->dentry);
object->dentry = NULL;
goto error_out;
delete_error:
_debug("delete error %d", ret);
goto error_out2;
lookup_error:
_debug("lookup error %ld", PTR_ERR(next));
ret = PTR_ERR(next);
if (ret == -EIO)
cachefiles_io_error(cache, "Lookup failed");
next = NULL;
error:
mutex_unlock(&dir->d_inode->i_mutex);
dput(next);
error_out2:
dput(dir);
error_out:
_leave(" = error %d", -ret);
return ret;
}
/*
* get a subdirectory
*/
struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
struct dentry *dir,
const char *dirname)
{
struct dentry *subdir;
unsigned long start;
struct path path;
int ret;
_enter(",,%s", dirname);
/* search the current directory for the element name */
mutex_lock(&dir->d_inode->i_mutex);
start = jiffies;
subdir = lookup_one_len(dirname, dir, strlen(dirname));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(subdir)) {
if (PTR_ERR(subdir) == -ENOMEM)
goto nomem_d_alloc;
goto lookup_error;
}
_debug("subdir -> %p %s",
subdir, subdir->d_inode ? "positive" : "negative");
/* we need to create the subdir if it doesn't exist yet */
if (!subdir->d_inode) {
ret = cachefiles_has_space(cache, 1, 0);
if (ret < 0)
goto mkdir_error;
_debug("attempt mkdir");
path.mnt = cache->mnt;
path.dentry = dir;
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
ret = vfs_mkdir(dir->d_inode, subdir, 0700);
if (ret < 0)
goto mkdir_error;
ASSERT(subdir->d_inode);
_debug("mkdir -> %p{%p{ino=%lu}}",
subdir,
subdir->d_inode,
subdir->d_inode->i_ino);
}
mutex_unlock(&dir->d_inode->i_mutex);
/* we need to make sure the subdir is a directory */
ASSERT(subdir->d_inode);
if (!S_ISDIR(subdir->d_inode->i_mode)) {
kerror("%s is not a directory", dirname);
ret = -EIO;
goto check_error;
}
ret = -EPERM;
if (!subdir->d_inode->i_op ||
!subdir->d_inode->i_op->setxattr ||
!subdir->d_inode->i_op->getxattr ||
!subdir->d_inode->i_op->lookup ||
!subdir->d_inode->i_op->mkdir ||
!subdir->d_inode->i_op->create ||
!subdir->d_inode->i_op->rename ||
!subdir->d_inode->i_op->rmdir ||
!subdir->d_inode->i_op->unlink)
goto check_error;
_leave(" = [%lu]", subdir->d_inode->i_ino);
return subdir;
check_error:
dput(subdir);
_leave(" = %d [check]", ret);
return ERR_PTR(ret);
mkdir_error:
mutex_unlock(&dir->d_inode->i_mutex);
dput(subdir);
kerror("mkdir %s failed with error %d", dirname, ret);
return ERR_PTR(ret);
lookup_error:
mutex_unlock(&dir->d_inode->i_mutex);
ret = PTR_ERR(subdir);
kerror("Lookup %s failed with error %d", dirname, ret);
return ERR_PTR(ret);
nomem_d_alloc:
mutex_unlock(&dir->d_inode->i_mutex);
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
/*
* find out if an object is in use or not
* - if finds object and it's not in use:
* - returns a pointer to the object and a reference on it
* - returns with the directory locked
*/
static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
struct dentry *dir,
char *filename)
{
struct cachefiles_object *object;
struct rb_node *_n;
struct dentry *victim;
unsigned long start;
int ret;
//_enter(",%*.*s/,%s",
// dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
/* look up the victim */
mutex_lock_nested(&dir->d_inode->i_mutex, 1);
start = jiffies;
victim = lookup_one_len(filename, dir, strlen(filename));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(victim))
goto lookup_error;
//_debug("victim -> %p %s",
// victim, victim->d_inode ? "positive" : "negative");
/* if the object is no longer there then we probably retired the object
* at the netfs's request whilst the cull was in progress
*/
if (!victim->d_inode) {
mutex_unlock(&dir->d_inode->i_mutex);
dput(victim);
_leave(" = -ENOENT [absent]");
return ERR_PTR(-ENOENT);
}
/* check to see if we're using this object */
read_lock(&cache->active_lock);
_n = cache->active_nodes.rb_node;
while (_n) {
object = rb_entry(_n, struct cachefiles_object, active_node);
if (object->dentry > victim)
_n = _n->rb_left;
else if (object->dentry < victim)
_n = _n->rb_right;
else
goto object_in_use;
}
read_unlock(&cache->active_lock);
//_leave(" = %p", victim);
return victim;
object_in_use:
read_unlock(&cache->active_lock);
mutex_unlock(&dir->d_inode->i_mutex);
dput(victim);
//_leave(" = -EBUSY [in use]");
return ERR_PTR(-EBUSY);
lookup_error:
mutex_unlock(&dir->d_inode->i_mutex);
ret = PTR_ERR(victim);
if (ret == -ENOENT) {
/* file or dir now absent - probably retired by netfs */
_leave(" = -ESTALE [absent]");
return ERR_PTR(-ESTALE);
}
if (ret == -EIO) {
cachefiles_io_error(cache, "Lookup failed");
} else if (ret != -ENOMEM) {
kerror("Internal error: %d", ret);
ret = -EIO;
}
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
* cull an object if it's not in use
* - called only by cache manager daemon
*/
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
int ret;
_enter(",%*.*s/,%s",
dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
victim = cachefiles_check_active(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
_debug("victim -> %p %s",
victim, victim->d_inode ? "positive" : "negative");
/* okay... the victim is not being used so we can cull it
* - start by marking it as stale
*/
_debug("victim is cullable");
ret = cachefiles_remove_object_xattr(cache, victim);
if (ret < 0)
goto error_unlock;
/* actually remove the victim (drops the dir mutex) */
_debug("bury");
ret = cachefiles_bury_object(cache, dir, victim, false);
if (ret < 0)
goto error;
dput(victim);
_leave(" = 0");
return 0;
error_unlock:
mutex_unlock(&dir->d_inode->i_mutex);
error:
dput(victim);
if (ret == -ENOENT) {
/* file or dir now absent - probably retired by netfs */
_leave(" = -ESTALE [absent]");
return -ESTALE;
}
if (ret != -ENOMEM) {
kerror("Internal error: %d", ret);
ret = -EIO;
}
_leave(" = %d", ret);
return ret;
}
/*
* find out if an object is in use or not
* - called only by cache manager daemon
* - returns -EBUSY or 0 to indicate whether an object is in use or not
*/
int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
//_enter(",%*.*s/,%s",
// dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
victim = cachefiles_check_active(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
mutex_unlock(&dir->d_inode->i_mutex);
dput(victim);
//_leave(" = 0");
return 0;
}
| gpl-2.0 |
ffolkes/android_kernel_samsung_trlte | arch/powerpc/platforms/ps3/repository.c | 4201 | 33511 | /*
* PS3 repository routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/lv1call.h>
#include "platform.h"
enum ps3_vendor_id {
PS3_VENDOR_ID_NONE = 0,
PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
};
enum ps3_lpar_id {
PS3_LPAR_ID_CURRENT = 0,
PS3_LPAR_ID_PME = 1,
};
#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
static void _dump_field(const char *hdr, u64 n, const char *func, int line)
{
#if defined(DEBUG)
char s[16];
const char *const in = (const char *)&n;
unsigned int i;
for (i = 0; i < 8; i++)
s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
s[i] = 0;
pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
#endif
}
#define dump_node_name(_a, _b, _c, _d, _e) \
_dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
u64 n4, const char *func, int line)
{
pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
_dump_field("n4: ", n4, func, line);
}
#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
_dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
u64 v1, u64 v2, const char *func, int line)
{
pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
_dump_field("n4: ", n4, func, line);
pr_devel("%s:%d: v1: %016llx\n", func, line, v1);
pr_devel("%s:%d: v2: %016llx\n", func, line, v2);
}
/**
* make_first_field - Make the first field of a repository node name.
* @text: Text portion of the field.
* @index: Numeric index portion of the field. Use zero for 'don't care'.
*
* This routine sets the vendor id to zero (non-vendor specific).
* Returns field value.
*/
static u64 make_first_field(const char *text, u64 index)
{
u64 n;
strncpy((char *)&n, text, 8);
return PS3_VENDOR_ID_NONE + (n >> 32) + index;
}
/**
* make_field - Make subsequent fields of a repository node name.
* @text: Text portion of the field. Use "" for 'don't care'.
* @index: Numeric index portion of the field. Use zero for 'don't care'.
*
* Returns field value.
*/
static u64 make_field(const char *text, u64 index)
{
u64 n;
strncpy((char *)&n, text, 8);
return n + index;
}
/**
* read_node - Read a repository node from raw fields.
* @n1: First field of node name.
* @n2: Second field of node name. Use zero for 'don't care'.
* @n3: Third field of node name. Use zero for 'don't care'.
* @n4: Fourth field of node name. Use zero for 'don't care'.
* @v1: First repository value (high word).
* @v2: Second repository value (low word). Optional parameter, use zero
* for 'don't care'.
*/
static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
u64 *_v1, u64 *_v2)
{
int result;
u64 v1;
u64 v2;
if (lpar_id == PS3_LPAR_ID_CURRENT) {
u64 id;
lv1_get_logical_partition_id(&id);
lpar_id = id;
}
result = lv1_read_repository_node(lpar_id, n1, n2, n3, n4, &v1,
&v2);
if (result) {
pr_warn("%s:%d: lv1_read_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
dump_node_name(lpar_id, n1, n2, n3, n4);
return -ENOENT;
}
dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
if (_v1)
*_v1 = v1;
if (_v2)
*_v2 = v2;
if (v1 && !_v1)
pr_devel("%s:%d: warning: discarding non-zero v1: %016llx\n",
__func__, __LINE__, v1);
if (v2 && !_v2)
pr_devel("%s:%d: warning: discarding non-zero v2: %016llx\n",
__func__, __LINE__, v2);
return 0;
}
int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
u64 *value)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field(bus_str, 0),
0, 0,
value, NULL);
}
int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id)
{
int result;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("id", 0),
0, 0,
bus_id, NULL);
return result;
}
int ps3_repository_read_bus_type(unsigned int bus_index,
enum ps3_bus_type *bus_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("type", 0),
0, 0,
&v1, NULL);
*bus_type = v1;
return result;
}
int ps3_repository_read_bus_num_dev(unsigned int bus_index,
unsigned int *num_dev)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("num_dev", 0),
0, 0,
&v1, NULL);
*num_dev = v1;
return result;
}
int ps3_repository_read_dev_str(unsigned int bus_index,
unsigned int dev_index, const char *dev_str, u64 *value)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field(dev_str, 0),
0,
value, NULL);
}
int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
u64 *dev_id)
{
int result;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("id", 0),
0,
dev_id, NULL);
return result;
}
int ps3_repository_read_dev_type(unsigned int bus_index,
unsigned int dev_index, enum ps3_dev_type *dev_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("type", 0),
0,
&v1, NULL);
*dev_type = v1;
return result;
}
int ps3_repository_read_dev_intr(unsigned int bus_index,
unsigned int dev_index, unsigned int intr_index,
enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
{
int result;
u64 v1 = 0;
u64 v2 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("intr", intr_index),
0,
&v1, &v2);
*intr_type = v1;
*interrupt_id = v2;
return result;
}
int ps3_repository_read_dev_reg_type(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index,
enum ps3_reg_type *reg_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("reg", reg_index),
make_field("type", 0),
&v1, NULL);
*reg_type = v1;
return result;
}
int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("reg", reg_index),
make_field("data", 0),
bus_addr, len);
}
int ps3_repository_read_dev_reg(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index,
enum ps3_reg_type *reg_type, u64 *bus_addr, u64 *len)
{
int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
reg_index, reg_type);
return result ? result
: ps3_repository_read_dev_reg_addr(bus_index, dev_index,
reg_index, bus_addr, len);
}
int ps3_repository_find_device(struct ps3_repository_device *repo)
{
int result;
struct ps3_repository_device tmp = *repo;
unsigned int num_dev;
BUG_ON(repo->bus_index > 10);
BUG_ON(repo->dev_index > 10);
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
return result;
}
pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
num_dev);
if (tmp.dev_index >= num_dev) {
pr_devel("%s:%d: no device found\n", __func__, __LINE__);
return -ENODEV;
}
result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
&tmp.dev_type);
if (result) {
pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__);
return result;
}
result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
&tmp.dev_id);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__,
__LINE__);
return result;
}
pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
*repo = tmp;
return 0;
}
int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
u64 bus_id, u64 dev_id)
{
int result = -ENODEV;
struct ps3_repository_device tmp;
unsigned int num_dev;
pr_devel(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
bus_id, dev_id);
for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
result = ps3_repository_read_bus_id(tmp.bus_index,
&tmp.bus_id);
if (result) {
pr_devel("%s:%u read_bus_id(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
if (tmp.bus_id == bus_id)
goto found_bus;
pr_devel("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
tmp.bus_id);
}
pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
found_bus:
result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type);
if (result) {
pr_devel("%s:%u read_bus_type(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
pr_devel("%s:%u read_bus_num_dev failed\n", __func__,
__LINE__);
return result;
}
for (tmp.dev_index = 0; tmp.dev_index < num_dev; tmp.dev_index++) {
result = ps3_repository_read_dev_id(tmp.bus_index,
tmp.dev_index,
&tmp.dev_id);
if (result) {
pr_devel("%s:%u read_dev_id(%u:%u) failed\n", __func__,
__LINE__, tmp.bus_index, tmp.dev_index);
return result;
}
if (tmp.dev_id == dev_id)
goto found_dev;
pr_devel("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
tmp.dev_id);
}
pr_devel(" <- %s:%u: dev not found\n", __func__, __LINE__);
return result;
found_dev:
result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
&tmp.dev_type);
if (result) {
pr_devel("%s:%u read_dev_type failed\n", __func__, __LINE__);
return result;
}
pr_devel(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
__func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
tmp.dev_index, tmp.bus_id, tmp.dev_id);
*repo = tmp;
return 0;
}
int ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo))
{
int result = 0;
struct ps3_repository_device repo;
pr_devel(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
repo.bus_type = bus_type;
result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
if (result) {
pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
}
result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
if (result) {
pr_devel("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
repo.bus_index);
return result;
}
for (repo.dev_index = 0; ; repo.dev_index++) {
result = ps3_repository_find_device(&repo);
if (result == -ENODEV) {
result = 0;
break;
} else if (result)
break;
result = callback(&repo);
if (result) {
pr_devel("%s:%d: abort at callback\n", __func__,
__LINE__);
break;
}
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index)
{
unsigned int i;
enum ps3_bus_type type;
int error;
for (i = from; i < 10; i++) {
error = ps3_repository_read_bus_type(i, &type);
if (error) {
pr_devel("%s:%d read_bus_type failed\n",
__func__, __LINE__);
*bus_index = UINT_MAX;
return error;
}
if (type == bus_type) {
*bus_index = i;
return 0;
}
}
*bus_index = UINT_MAX;
return -ENODEV;
}
int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
{
int result = 0;
unsigned int res_index;
pr_devel("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
*interrupt_id = UINT_MAX;
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_interrupt_type t;
unsigned int id;
result = ps3_repository_read_dev_intr(repo->bus_index,
repo->dev_index, res_index, &t, &id);
if (result) {
pr_devel("%s:%d read_dev_intr failed\n",
__func__, __LINE__);
return result;
}
if (t == intr_type) {
*interrupt_id = id;
break;
}
}
if (res_index == 10)
return -ENODEV;
pr_devel("%s:%d: found intr_type %u at res_index %u\n",
__func__, __LINE__, intr_type, res_index);
return result;
}
int ps3_repository_find_reg(const struct ps3_repository_device *repo,
enum ps3_reg_type reg_type, u64 *bus_addr, u64 *len)
{
int result = 0;
unsigned int res_index;
pr_devel("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
*bus_addr = *len = 0;
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_reg_type t;
u64 a;
u64 l;
result = ps3_repository_read_dev_reg(repo->bus_index,
repo->dev_index, res_index, &t, &a, &l);
if (result) {
pr_devel("%s:%d read_dev_reg failed\n",
__func__, __LINE__);
return result;
}
if (t == reg_type) {
*bus_addr = a;
*len = l;
break;
}
}
if (res_index == 10)
return -ENODEV;
pr_devel("%s:%d: found reg_type %u at res_index %u\n",
__func__, __LINE__, reg_type, res_index);
return result;
}
int ps3_repository_read_stor_dev_port(unsigned int bus_index,
unsigned int dev_index, u64 *port)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("port", 0),
0, port, NULL);
}
int ps3_repository_read_stor_dev_blk_size(unsigned int bus_index,
unsigned int dev_index, u64 *blk_size)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("blk_size", 0),
0, blk_size, NULL);
}
int ps3_repository_read_stor_dev_num_blocks(unsigned int bus_index,
unsigned int dev_index, u64 *num_blocks)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("n_blocks", 0),
0, num_blocks, NULL);
}
int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
unsigned int dev_index, unsigned int *num_regions)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("n_regs", 0),
0, &v1, NULL);
*num_regions = v1;
return result;
}
int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index,
unsigned int *region_id)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("id", 0),
&v1, NULL);
*region_id = v1;
return result;
}
int ps3_repository_read_stor_dev_region_size(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index, u64 *region_size)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("size", 0),
region_size, NULL);
}
int ps3_repository_read_stor_dev_region_start(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index, u64 *region_start)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("start", 0),
region_start, NULL);
}
int ps3_repository_read_stor_dev_info(unsigned int bus_index,
unsigned int dev_index, u64 *port, u64 *blk_size,
u64 *num_blocks, unsigned int *num_regions)
{
int result;
result = ps3_repository_read_stor_dev_port(bus_index, dev_index, port);
if (result)
return result;
result = ps3_repository_read_stor_dev_blk_size(bus_index, dev_index,
blk_size);
if (result)
return result;
result = ps3_repository_read_stor_dev_num_blocks(bus_index, dev_index,
num_blocks);
if (result)
return result;
result = ps3_repository_read_stor_dev_num_regions(bus_index, dev_index,
num_regions);
return result;
}
int ps3_repository_read_stor_dev_region(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index,
unsigned int *region_id, u64 *region_start, u64 *region_size)
{
int result;
result = ps3_repository_read_stor_dev_region_id(bus_index, dev_index,
region_index, region_id);
if (result)
return result;
result = ps3_repository_read_stor_dev_region_start(bus_index, dev_index,
region_index, region_start);
if (result)
return result;
result = ps3_repository_read_stor_dev_region_size(bus_index, dev_index,
region_index, region_size);
return result;
}
/**
* ps3_repository_read_num_pu - Number of logical PU processors for this lpar.
*/
int ps3_repository_read_num_pu(u64 *num_pu)
{
*num_pu = 0;
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pun", 0),
0, 0,
num_pu, NULL);
}
/**
* ps3_repository_read_pu_id - Read the logical PU id.
* @pu_index: Zero based index.
* @pu_id: The logical PU id.
*/
int ps3_repository_read_pu_id(unsigned int pu_index, u64 *pu_id)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pu", pu_index),
0, 0,
pu_id, NULL);
}
int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pu", 0),
ppe_id,
make_field("rm_size", 0),
rm_size, NULL);
}
int ps3_repository_read_region_total(u64 *region_total)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("rgntotal", 0),
0, 0,
region_total, NULL);
}
/**
* ps3_repository_read_mm_info - Read mm info for single pu system.
* @rm_base: Real mode memory base address.
* @rm_size: Real mode memory size.
* @region_total: Maximum memory region size.
*/
int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
{
int result;
u64 ppe_id;
lv1_get_logical_ppe_id(&ppe_id);
*rm_base = 0;
result = ps3_repository_read_rm_size(ppe_id, rm_size);
return result ? result
: ps3_repository_read_region_total(region_total);
}
/**
* ps3_repository_read_highmem_region_count - Read the number of highmem regions
*
* Bootloaders must arrange the repository nodes such that regions are indexed
* with a region_index from 0 to region_count-1.
*/
int ps3_repository_read_highmem_region_count(unsigned int *region_count)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
&v1, NULL);
*region_count = v1;
return result;
}
int ps3_repository_read_highmem_base(unsigned int region_index,
u64 *highmem_base)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, NULL);
}
int ps3_repository_read_highmem_size(unsigned int region_index,
u64 *highmem_size)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, NULL);
}
/**
* ps3_repository_read_highmem_info - Read high memory region info
* @region_index: Region index, {0,..,region_count-1}.
* @highmem_base: High memory base address.
* @highmem_size: High memory size.
*
* Bootloaders that preallocate highmem regions must place the
* region info into the repository at these well known nodes.
*/
int ps3_repository_read_highmem_info(unsigned int region_index,
u64 *highmem_base, u64 *highmem_size)
{
int result;
*highmem_base = 0;
result = ps3_repository_read_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_read_highmem_size(region_index, highmem_size);
}
/**
* ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
* @num_spu: Number of physical spus.
*/
int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spun", 0),
0, 0,
&v1, NULL);
*num_spu_reserved = v1;
return result;
}
/**
* ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
* @num_resource_id: Number of spu resource ids.
*/
int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spursvn", 0),
0, 0,
&v1, NULL);
*num_resource_id = v1;
return result;
}
/**
* ps3_repository_read_spu_resource_id - spu resource reservation id value.
* @res_index: Resource reservation index.
* @resource_type: Resource reservation type.
* @resource_id: Resource reservation id.
*/
int ps3_repository_read_spu_resource_id(unsigned int res_index,
enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
{
int result;
u64 v1 = 0;
u64 v2 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spursv", 0),
res_index,
0,
&v1, &v2);
*resource_type = v1;
*resource_id = v2;
return result;
}
static int ps3_repository_read_boot_dat_address(u64 *address)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("boot_dat", 0),
make_field("address", 0),
0,
address, NULL);
}
int ps3_repository_read_boot_dat_size(unsigned int *size)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("boot_dat", 0),
make_field("size", 0),
0,
&v1, NULL);
*size = v1;
return result;
}
int ps3_repository_read_vuart_av_port(unsigned int *port)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("vir_uart", 0),
make_field("port", 0),
make_field("avset", 0),
&v1, NULL);
*port = v1;
return result;
}
int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("vir_uart", 0),
make_field("port", 0),
make_field("sysmgr", 0),
&v1, NULL);
*port = v1;
return result;
}
/**
* ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
* address: lpar address of cell_ext_os_area
* @size: size of cell_ext_os_area
*/
int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
{
int result;
*size = 0;
result = ps3_repository_read_boot_dat_address(lpar_addr);
return result ? result
: ps3_repository_read_boot_dat_size(size);
}
/**
* ps3_repository_read_num_be - Number of physical BE processors in the system.
*/
int ps3_repository_read_num_be(unsigned int *num_be)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("ben", 0),
0,
0,
0,
&v1, NULL);
*num_be = v1;
return result;
}
/**
* ps3_repository_read_be_node_id - Read the physical BE processor node id.
* @be_index: Zero based index.
* @node_id: The BE processor node id.
*/
int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", be_index),
0,
0,
0,
node_id, NULL);
}
/**
* ps3_repository_read_be_id - Read the physical BE processor id.
* @node_id: The BE processor node id.
* @be_id: The BE processor id.
*/
int ps3_repository_read_be_id(u64 node_id, u64 *be_id)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
0,
0,
be_id, NULL);
}
int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
make_field("clock", 0),
0,
tb_freq, NULL);
}
int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
{
int result;
u64 node_id;
*tb_freq = 0;
result = ps3_repository_read_be_node_id(be_index, &node_id);
return result ? result
: ps3_repository_read_tb_freq(node_id, tb_freq);
}
int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
u64 *rights)
{
int result;
u64 node_id;
*lpar = 0;
*rights = 0;
result = ps3_repository_read_be_node_id(be_index, &node_id);
return result ? result
: read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
make_field("lpm", 0),
make_field("priv", 0),
lpar, rights);
}
#if defined(CONFIG_PS3_REPOSITORY_WRITE)
static int create_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
dump_node(0, n1, n2, n3, n4, v1, v2);
result = lv1_create_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_create_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int delete_node(u64 n1, u64 n2, u64 n3, u64 n4)
{
int result;
dump_node(0, n1, n2, n3, n4, 0, 0);
result = lv1_delete_repository_node(n1, n2, n3, n4);
if (result) {
pr_devel("%s:%d: lv1_delete_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
result = create_node(n1, n2, n3, n4, v1, v2);
if (!result)
return 0;
result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_write_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
int ps3_repository_write_highmem_region_count(unsigned int region_count)
{
int result;
u64 v1 = (u64)region_count;
result = write_node(
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
v1, 0);
return result;
}
int ps3_repository_write_highmem_base(unsigned int region_index,
u64 highmem_base)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, 0);
}
int ps3_repository_write_highmem_size(unsigned int region_index,
u64 highmem_size)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, 0);
}
int ps3_repository_write_highmem_info(unsigned int region_index,
u64 highmem_base, u64 highmem_size)
{
int result;
result = ps3_repository_write_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_write_highmem_size(region_index, highmem_size);
}
static int ps3_repository_delete_highmem_base(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0);
}
static int ps3_repository_delete_highmem_size(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0);
}
int ps3_repository_delete_highmem_info(unsigned int region_index)
{
int result;
result = ps3_repository_delete_highmem_base(region_index);
result += ps3_repository_delete_highmem_size(region_index);
return result ? -1 : 0;
}
#endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */
#if defined(DEBUG)
int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
{
int result = 0;
unsigned int res_index;
pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_interrupt_type intr_type;
unsigned int interrupt_id;
result = ps3_repository_read_dev_intr(repo->bus_index,
repo->dev_index, res_index, &intr_type, &interrupt_id);
if (result) {
if (result != LV1_NO_ENTRY)
pr_devel("%s:%d ps3_repository_read_dev_intr"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
intr_type, interrupt_id);
}
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_reg_type reg_type;
u64 bus_addr;
u64 len;
result = ps3_repository_read_dev_reg(repo->bus_index,
repo->dev_index, res_index, ®_type, &bus_addr, &len);
if (result) {
if (result != LV1_NO_ENTRY)
pr_devel("%s:%d ps3_repository_read_dev_reg"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) reg_type %u, bus_addr %llxh, len %llxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
reg_type, bus_addr, len);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int dump_stor_dev_info(struct ps3_repository_device *repo)
{
int result = 0;
unsigned int num_regions, region_index;
u64 port, blk_size, num_blocks;
pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
result = ps3_repository_read_stor_dev_info(repo->bus_index,
repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
if (result) {
pr_devel("%s:%d ps3_repository_read_stor_dev_info"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
goto out;
}
pr_devel("%s:%d (%u:%u): port %llu, blk_size %llu, num_blocks "
"%llu, num_regions %u\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
port, blk_size, num_blocks, num_regions);
for (region_index = 0; region_index < num_regions; region_index++) {
unsigned int region_id;
u64 region_start, region_size;
result = ps3_repository_read_stor_dev_region(repo->bus_index,
repo->dev_index, region_index, ®ion_id,
®ion_start, ®ion_size);
if (result) {
pr_devel("%s:%d ps3_repository_read_stor_dev_region"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
region_id, (unsigned long)region_start,
(unsigned long)region_size);
}
out:
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int dump_device_info(struct ps3_repository_device *repo,
unsigned int num_dev)
{
int result = 0;
pr_devel(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
for (repo->dev_index = 0; repo->dev_index < num_dev;
repo->dev_index++) {
result = ps3_repository_read_dev_type(repo->bus_index,
repo->dev_index, &repo->dev_type);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_type"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
result = ps3_repository_read_dev_id(repo->bus_index,
repo->dev_index, &repo->dev_id);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_id"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
continue;
}
pr_devel("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__,
__LINE__, repo->bus_index, repo->dev_index,
repo->dev_type, (unsigned long)repo->dev_id);
ps3_repository_dump_resource_info(repo);
if (repo->bus_type == PS3_BUS_TYPE_STORAGE)
dump_stor_dev_info(repo);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_repository_dump_bus_info(void)
{
int result = 0;
struct ps3_repository_device repo;
pr_devel(" -> %s:%d\n", __func__, __LINE__);
memset(&repo, 0, sizeof(repo));
for (repo.bus_index = 0; repo.bus_index < 10; repo.bus_index++) {
unsigned int num_dev;
result = ps3_repository_read_bus_type(repo.bus_index,
&repo.bus_type);
if (result) {
pr_devel("%s:%d read_bus_type(%u) failed\n",
__func__, __LINE__, repo.bus_index);
break;
}
result = ps3_repository_read_bus_id(repo.bus_index,
&repo.bus_id);
if (result) {
pr_devel("%s:%d read_bus_id(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
if (repo.bus_index != repo.bus_id)
pr_devel("%s:%d bus_index != bus_id\n",
__func__, __LINE__);
result = ps3_repository_read_bus_num_dev(repo.bus_index,
&num_dev);
if (result) {
pr_devel("%s:%d read_bus_num_dev(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
pr_devel("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
__func__, __LINE__, repo.bus_index, repo.bus_type,
(unsigned long)repo.bus_id, num_dev);
dump_device_info(&repo, num_dev);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
#endif /* defined(DEBUG) */
| gpl-2.0 |
mseskir/android_kernel_vestel_g55 | arch/sh/kernel/traps.c | 4457 | 2262 | #include <linux/bug.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/kdebug.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/unwinder.h>
#include <asm/traps.h>
#ifdef CONFIG_GENERIC_BUG
static void handle_BUG(struct pt_regs *regs)
{
const struct bug_entry *bug;
unsigned long bugaddr = regs->pc;
enum bug_trap_type tt;
if (!is_valid_bugaddr(bugaddr))
goto invalid;
bug = find_bug(bugaddr);
/* Switch unwinders when unwind_stack() is called */
if (bug->flags & BUGFLAG_UNWINDER)
unwinder_faulted = 1;
tt = report_bug(bugaddr, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->pc += instruction_size(bugaddr);
return;
}
invalid:
die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
int is_valid_bugaddr(unsigned long addr)
{
insn_size_t opcode;
if (addr < PAGE_OFFSET)
return 0;
if (probe_kernel_address((insn_size_t *)addr, opcode))
return 0;
if (opcode == TRAPA_BUG_OPCODE)
return 1;
return 0;
}
#endif
/*
* Generic trap handler.
*/
BUILD_TRAP_HANDLER(debug)
{
TRAP_HANDLER_DECL;
/* Rewind */
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
SIGTRAP) == NOTIFY_STOP)
return;
force_sig(SIGTRAP, current);
}
/*
* Special handler for BUG() traps.
*/
BUILD_TRAP_HANDLER(bug)
{
TRAP_HANDLER_DECL;
/* Rewind */
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
SIGTRAP) == NOTIFY_STOP)
return;
#ifdef CONFIG_GENERIC_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
if (insn == TRAPA_BUG_OPCODE)
handle_BUG(regs);
return;
}
#endif
force_sig(SIGTRAP, current);
}
BUILD_TRAP_HANDLER(nmi)
{
unsigned int cpu = smp_processor_id();
TRAP_HANDLER_DECL;
nmi_enter();
nmi_count(cpu)++;
switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
case NOTIFY_OK:
case NOTIFY_STOP:
break;
case NOTIFY_BAD:
die("Fatal Non-Maskable Interrupt", regs, SIGINT);
default:
printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
break;
}
nmi_exit();
}
| gpl-2.0 |
Renzo-Olivares/BAMF_android_kernel_htc_msm8660 | arch/x86/kernel/init_task.c | 8809 | 1278 | #include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
| gpl-2.0 |
Dee-UK/RK3188_KK_4.4.02_Beta | drivers/video/fbcvt.c | 11369 | 9506 | /*
* linux/drivers/video/fbcvt.c - VESA(TM) Coordinated Video Timings
*
* Copyright (C) 2005 Antonino Daplas <adaplas@pol.net>
*
* Based from the VESA(TM) Coordinated Video Timing Generator by
* Graham Loveridge April 9, 2003 available at
* http://www.elo.utfsm.cl/~elo212/docs/CVTd6r1.xls
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/fb.h>
#include <linux/slab.h>
#define FB_CVT_CELLSIZE 8
#define FB_CVT_GTF_C 40
#define FB_CVT_GTF_J 20
#define FB_CVT_GTF_K 128
#define FB_CVT_GTF_M 600
#define FB_CVT_MIN_VSYNC_BP 550
#define FB_CVT_MIN_VPORCH 3
#define FB_CVT_MIN_BPORCH 6
#define FB_CVT_RB_MIN_VBLANK 460
#define FB_CVT_RB_HBLANK 160
#define FB_CVT_RB_V_FPORCH 3
#define FB_CVT_FLAG_REDUCED_BLANK 1
#define FB_CVT_FLAG_MARGINS 2
#define FB_CVT_FLAG_INTERLACED 4
struct fb_cvt_data {
u32 xres;
u32 yres;
u32 refresh;
u32 f_refresh;
u32 pixclock;
u32 hperiod;
u32 hblank;
u32 hfreq;
u32 htotal;
u32 vtotal;
u32 vsync;
u32 hsync;
u32 h_front_porch;
u32 h_back_porch;
u32 v_front_porch;
u32 v_back_porch;
u32 h_margin;
u32 v_margin;
u32 interlace;
u32 aspect_ratio;
u32 active_pixels;
u32 flags;
u32 status;
};
static const unsigned char fb_cvt_vbi_tab[] = {
4, /* 4:3 */
5, /* 16:9 */
6, /* 16:10 */
7, /* 5:4 */
7, /* 15:9 */
8, /* reserved */
9, /* reserved */
10 /* custom */
};
/* returns hperiod * 1000 */
static u32 fb_cvt_hperiod(struct fb_cvt_data *cvt)
{
u32 num = 1000000000/cvt->f_refresh;
u32 den;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
num -= FB_CVT_RB_MIN_VBLANK * 1000;
den = 2 * (cvt->yres/cvt->interlace + 2 * cvt->v_margin);
} else {
num -= FB_CVT_MIN_VSYNC_BP * 1000;
den = 2 * (cvt->yres/cvt->interlace + cvt->v_margin * 2
+ FB_CVT_MIN_VPORCH + cvt->interlace/2);
}
return 2 * (num/den);
}
/* returns ideal duty cycle * 1000 */
static u32 fb_cvt_ideal_duty_cycle(struct fb_cvt_data *cvt)
{
u32 c_prime = (FB_CVT_GTF_C - FB_CVT_GTF_J) *
(FB_CVT_GTF_K) + 256 * FB_CVT_GTF_J;
u32 m_prime = (FB_CVT_GTF_K * FB_CVT_GTF_M);
u32 h_period_est = cvt->hperiod;
return (1000 * c_prime - ((m_prime * h_period_est)/1000))/256;
}
static u32 fb_cvt_hblank(struct fb_cvt_data *cvt)
{
u32 hblank = 0;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
hblank = FB_CVT_RB_HBLANK;
else {
u32 ideal_duty_cycle = fb_cvt_ideal_duty_cycle(cvt);
u32 active_pixels = cvt->active_pixels;
if (ideal_duty_cycle < 20000)
hblank = (active_pixels * 20000)/
(100000 - 20000);
else {
hblank = (active_pixels * ideal_duty_cycle)/
(100000 - ideal_duty_cycle);
}
}
hblank &= ~((2 * FB_CVT_CELLSIZE) - 1);
return hblank;
}
static u32 fb_cvt_hsync(struct fb_cvt_data *cvt)
{
u32 hsync;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
hsync = 32;
else
hsync = (FB_CVT_CELLSIZE * cvt->htotal)/100;
hsync &= ~(FB_CVT_CELLSIZE - 1);
return hsync;
}
static u32 fb_cvt_vbi_lines(struct fb_cvt_data *cvt)
{
u32 vbi_lines, min_vbi_lines, act_vbi_lines;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
vbi_lines = (1000 * FB_CVT_RB_MIN_VBLANK)/cvt->hperiod + 1;
min_vbi_lines = FB_CVT_RB_V_FPORCH + cvt->vsync +
FB_CVT_MIN_BPORCH;
} else {
vbi_lines = (FB_CVT_MIN_VSYNC_BP * 1000)/cvt->hperiod + 1 +
FB_CVT_MIN_VPORCH;
min_vbi_lines = cvt->vsync + FB_CVT_MIN_BPORCH +
FB_CVT_MIN_VPORCH;
}
if (vbi_lines < min_vbi_lines)
act_vbi_lines = min_vbi_lines;
else
act_vbi_lines = vbi_lines;
return act_vbi_lines;
}
static u32 fb_cvt_vtotal(struct fb_cvt_data *cvt)
{
u32 vtotal = cvt->yres/cvt->interlace;
vtotal += 2 * cvt->v_margin + cvt->interlace/2 + fb_cvt_vbi_lines(cvt);
vtotal |= cvt->interlace/2;
return vtotal;
}
static u32 fb_cvt_pixclock(struct fb_cvt_data *cvt)
{
u32 pixclock;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
pixclock = (cvt->f_refresh * cvt->vtotal * cvt->htotal)/1000;
else
pixclock = (cvt->htotal * 1000000)/cvt->hperiod;
pixclock /= 250;
pixclock *= 250;
pixclock *= 1000;
return pixclock;
}
static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt)
{
u32 xres = cvt->xres;
u32 yres = cvt->yres;
u32 aspect = -1;
if (xres == (yres * 4)/3 && !((yres * 4) % 3))
aspect = 0;
else if (xres == (yres * 16)/9 && !((yres * 16) % 9))
aspect = 1;
else if (xres == (yres * 16)/10 && !((yres * 16) % 10))
aspect = 2;
else if (xres == (yres * 5)/4 && !((yres * 5) % 4))
aspect = 3;
else if (xres == (yres * 15)/9 && !((yres * 15) % 9))
aspect = 4;
else {
printk(KERN_INFO "fbcvt: Aspect ratio not CVT "
"standard\n");
aspect = 7;
cvt->status = 1;
}
return aspect;
}
static void fb_cvt_print_name(struct fb_cvt_data *cvt)
{
u32 pixcount, pixcount_mod;
int cnt = 255, offset = 0, read = 0;
u8 *buf = kzalloc(256, GFP_KERNEL);
if (!buf)
return;
pixcount = (cvt->xres * (cvt->yres/cvt->interlace))/1000000;
pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
pixcount_mod /= 1000;
read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
cvt->xres, cvt->yres, cvt->refresh);
offset += read;
cnt -= read;
if (cvt->status)
snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
"Pixel Image\n", pixcount, pixcount_mod);
else {
if (pixcount) {
read = snprintf(buf+offset, cnt, "%d", pixcount);
cnt -= read;
offset += read;
}
read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
cnt -= read;
offset += read;
if (cvt->aspect_ratio == 0)
read = snprintf(buf+offset, cnt, "3");
else if (cvt->aspect_ratio == 3)
read = snprintf(buf+offset, cnt, "4");
else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
read = snprintf(buf+offset, cnt, "9");
else if (cvt->aspect_ratio == 2)
read = snprintf(buf+offset, cnt, "A");
else
read = 0;
cnt -= read;
offset += read;
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
read = snprintf(buf+offset, cnt, "-R");
cnt -= read;
offset += read;
}
}
printk(KERN_INFO "%s\n", buf);
kfree(buf);
}
static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
struct fb_videomode *mode)
{
mode->refresh = cvt->f_refresh;
mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
mode->left_margin = cvt->h_back_porch;
mode->right_margin = cvt->h_front_porch;
mode->hsync_len = cvt->hsync;
mode->upper_margin = cvt->v_back_porch;
mode->lower_margin = cvt->v_front_porch;
mode->vsync_len = cvt->vsync;
mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK)
mode->sync |= FB_SYNC_HOR_HIGH_ACT;
else
mode->sync |= FB_SYNC_VERT_HIGH_ACT;
}
/*
* fb_find_mode_cvt - calculate mode using VESA(TM) CVT
* @mode: pointer to fb_videomode; xres, yres, refresh and vmode must be
* pre-filled with the desired values
* @margins: add margin to calculation (1.8% of xres and yres)
* @rb: compute with reduced blanking (for flatpanels)
*
* RETURNS:
* 0 for success
* @mode is filled with computed values. If interlaced, the refresh field
* will be filled with the field rate (2x the frame rate)
*
* DESCRIPTION:
* Computes video timings using VESA(TM) Coordinated Video Timings
*/
int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb)
{
struct fb_cvt_data cvt;
memset(&cvt, 0, sizeof(cvt));
if (margins)
cvt.flags |= FB_CVT_FLAG_MARGINS;
if (rb)
cvt.flags |= FB_CVT_FLAG_REDUCED_BLANK;
if (mode->vmode & FB_VMODE_INTERLACED)
cvt.flags |= FB_CVT_FLAG_INTERLACED;
cvt.xres = mode->xres;
cvt.yres = mode->yres;
cvt.refresh = mode->refresh;
cvt.f_refresh = cvt.refresh;
cvt.interlace = 1;
if (!cvt.xres || !cvt.yres || !cvt.refresh) {
printk(KERN_INFO "fbcvt: Invalid input parameters\n");
return 1;
}
if (!(cvt.refresh == 50 || cvt.refresh == 60 || cvt.refresh == 70 ||
cvt.refresh == 85)) {
printk(KERN_INFO "fbcvt: Refresh rate not CVT "
"standard\n");
cvt.status = 1;
}
cvt.xres &= ~(FB_CVT_CELLSIZE - 1);
if (cvt.flags & FB_CVT_FLAG_INTERLACED) {
cvt.interlace = 2;
cvt.f_refresh *= 2;
}
if (cvt.flags & FB_CVT_FLAG_REDUCED_BLANK) {
if (cvt.refresh != 60) {
printk(KERN_INFO "fbcvt: 60Hz refresh rate "
"advised for reduced blanking\n");
cvt.status = 1;
}
}
if (cvt.flags & FB_CVT_FLAG_MARGINS) {
cvt.h_margin = (cvt.xres * 18)/1000;
cvt.h_margin &= ~(FB_CVT_CELLSIZE - 1);
cvt.v_margin = ((cvt.yres/cvt.interlace)* 18)/1000;
}
cvt.aspect_ratio = fb_cvt_aspect_ratio(&cvt);
cvt.active_pixels = cvt.xres + 2 * cvt.h_margin;
cvt.hperiod = fb_cvt_hperiod(&cvt);
cvt.vsync = fb_cvt_vbi_tab[cvt.aspect_ratio];
cvt.vtotal = fb_cvt_vtotal(&cvt);
cvt.hblank = fb_cvt_hblank(&cvt);
cvt.htotal = cvt.active_pixels + cvt.hblank;
cvt.hsync = fb_cvt_hsync(&cvt);
cvt.pixclock = fb_cvt_pixclock(&cvt);
cvt.hfreq = cvt.pixclock/cvt.htotal;
cvt.h_back_porch = cvt.hblank/2 + cvt.h_margin;
cvt.h_front_porch = cvt.hblank - cvt.hsync - cvt.h_back_porch +
2 * cvt.h_margin;
cvt.v_back_porch = 3 + cvt.v_margin;
cvt.v_front_porch = cvt.vtotal - cvt.yres/cvt.interlace -
cvt.v_back_porch - cvt.vsync;
fb_cvt_print_name(&cvt);
fb_cvt_convert_to_mode(&cvt, mode);
return 0;
}
| gpl-2.0 |
lollipop-og/android_kernel_geehrc | arch/arm/kernel/isa.c | 12137 | 1662 | /*
* linux/arch/arm/kernel/isa.c
*
* Copyright (C) 1999 Phil Blundell
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* ISA shared memory and I/O port support, and is required to support
* iopl, inb, outb and friends in userspace via glibc emulation.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/io.h>
static unsigned int isa_membase, isa_portbase, isa_portshift;
static ctl_table ctl_isa_vars[4] = {
{
.procname = "membase",
.data = &isa_membase,
.maxlen = sizeof(isa_membase),
.mode = 0444,
.proc_handler = proc_dointvec,
}, {
.procname = "portbase",
.data = &isa_portbase,
.maxlen = sizeof(isa_portbase),
.mode = 0444,
.proc_handler = proc_dointvec,
}, {
.procname = "portshift",
.data = &isa_portshift,
.maxlen = sizeof(isa_portshift),
.mode = 0444,
.proc_handler = proc_dointvec,
}, {}
};
static struct ctl_table_header *isa_sysctl_header;
static ctl_table ctl_isa[2] = {
{
.procname = "isa",
.mode = 0555,
.child = ctl_isa_vars,
}, {}
};
static ctl_table ctl_bus[2] = {
{
.procname = "bus",
.mode = 0555,
.child = ctl_isa,
}, {}
};
void __init
register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift)
{
isa_membase = membase;
isa_portbase = portbase;
isa_portshift = portshift;
isa_sysctl_header = register_sysctl_table(ctl_bus);
}
| gpl-2.0 |
Pafcholini/emotion_beta_511_no_updates | drivers/video/console/font_sun12x22.c | 14697 | 185842 | #include <linux/font.h>
#define FONTDATAMAX 11264
static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
/* 0 0x00 '^@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 1 0x01 '^A' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x30, 0x60, /* 001100000110 */
0x65, 0x30, /* 011001010011 */
0x6d, 0xb0, /* 011011011011 */
0x60, 0x30, /* 011000000011 */
0x62, 0x30, /* 011000100011 */
0x62, 0x30, /* 011000100011 */
0x60, 0x30, /* 011000000011 */
0x6f, 0xb0, /* 011011111011 */
0x67, 0x30, /* 011001110011 */
0x30, 0x60, /* 001100000110 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 2 0x02 '^B' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x3f, 0xe0, /* 001111111110 */
0x7a, 0xf0, /* 011110101111 */
0x72, 0x70, /* 011100100111 */
0x7f, 0xf0, /* 011111111111 */
0x7d, 0xf0, /* 011111011111 */
0x7d, 0xf0, /* 011111011111 */
0x7f, 0xf0, /* 011111111111 */
0x70, 0x70, /* 011100000111 */
0x78, 0xf0, /* 011110001111 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 3 0x03 '^C' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x3f, 0xc0, /* 001111111100 */
0x7f, 0xe0, /* 011111111110 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 4 0x04 '^D' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x1f, 0xc0, /* 000111111100 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xc0, /* 000111111100 */
0x1f, 0xc0, /* 000111111100 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x07, 0x00, /* 000001110000 */
0x02, 0x00, /* 000000100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 5 0x05 '^E' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x07, 0x00, /* 000001110000 */
0x02, 0x00, /* 000000100000 */
0x18, 0xc0, /* 000110001100 */
0x3d, 0xe0, /* 001111011110 */
0x3d, 0xe0, /* 001111011110 */
0x1a, 0xc0, /* 000110101100 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 6 0x06 '^F' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x36, 0xc0, /* 001101101100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 7 0x07 '^G' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 8 0x08 '^H' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xf9, 0xf0, /* 111110011111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xe0, 0x70, /* 111000000111 */
0xe0, 0x70, /* 111000000111 */
0xc0, 0x30, /* 110000000011 */
0xc0, 0x30, /* 110000000011 */
0xe0, 0x70, /* 111000000111 */
0xe0, 0x70, /* 111000000111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xf9, 0xf0, /* 111110011111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 9 0x09 '^I' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 10 0x0a '^J' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xf9, 0xf0, /* 111110011111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xe6, 0x70, /* 111001100111 */
0xe6, 0x70, /* 111001100111 */
0xcf, 0x30, /* 110011110011 */
0xcf, 0x30, /* 110011110011 */
0xe6, 0x70, /* 111001100111 */
0xe6, 0x70, /* 111001100111 */
0xf0, 0xf0, /* 111100001111 */
0xf0, 0xf0, /* 111100001111 */
0xf9, 0xf0, /* 111110011111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 11 0x0b '^K' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xe0, /* 000011111110 */
0x0f, 0xe0, /* 000011111110 */
0x01, 0xe0, /* 000000011110 */
0x03, 0x60, /* 000000110110 */
0x06, 0x60, /* 000001100110 */
0x1e, 0x00, /* 000111100000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x61, 0x80, /* 011000011000 */
0x61, 0x80, /* 011000011000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 12 0x0c '^L' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 13 0x0d '^M' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xe0, /* 000011111110 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x60, /* 000011000110 */
0x0f, 0xe0, /* 000011111110 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x3c, 0x00, /* 001111000000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 14 0x0e '^N' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x1f, 0xe0, /* 000111111110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x18, 0x60, /* 000110000110 */
0x19, 0xe0, /* 000110011110 */
0x1b, 0xe0, /* 000110111110 */
0x1b, 0xc0, /* 000110111100 */
0x79, 0x80, /* 011110011000 */
0xf8, 0x00, /* 111110000000 */
0xf0, 0x00, /* 111100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 15 0x0f '^O' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0xc0, /* 000110001100 */
0x0d, 0x80, /* 000011011000 */
0x6d, 0xb0, /* 011011011011 */
0x3d, 0xe0, /* 001111011110 */
0x00, 0x00, /* 000000000000 */
0x3d, 0xe0, /* 001111011110 */
0x6d, 0xb0, /* 011011011011 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 16 0x10 '^P' */
0x00, 0x00, /* 000000000000 */
0x00, 0x20, /* 000000000010 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x01, 0xe0, /* 000000011110 */
0x03, 0xe0, /* 000000111110 */
0x07, 0xe0, /* 000001111110 */
0x0f, 0xe0, /* 000011111110 */
0x1f, 0xe0, /* 000111111110 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xe0, /* 011111111110 */
0x3f, 0xe0, /* 001111111110 */
0x1f, 0xe0, /* 000111111110 */
0x0f, 0xe0, /* 000011111110 */
0x07, 0xe0, /* 000001111110 */
0x03, 0xe0, /* 000000111110 */
0x01, 0xe0, /* 000000011110 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x20, /* 000000000010 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 17 0x11 '^Q' */
0x00, 0x00, /* 000000000000 */
0x40, 0x00, /* 010000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x00, /* 011100000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x7e, 0x00, /* 011111100000 */
0x7f, 0x00, /* 011111110000 */
0x7f, 0x80, /* 011111111000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0x80, /* 011111111000 */
0x7f, 0x00, /* 011111110000 */
0x7e, 0x00, /* 011111100000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x60, 0x00, /* 011000000000 */
0x40, 0x00, /* 010000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 18 0x12 '^R' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 19 0x13 '^S' */
0x00, 0x00, /* 000000000000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 20 0x14 '^T' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xf0, /* 000111111111 */
0x3c, 0xc0, /* 001111001100 */
0x7c, 0xc0, /* 011111001100 */
0x7c, 0xc0, /* 011111001100 */
0x7c, 0xc0, /* 011111001100 */
0x3c, 0xc0, /* 001111001100 */
0x1c, 0xc0, /* 000111001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x1c, 0xe0, /* 000111001110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 21 0x15 '^U' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x1f, 0x00, /* 000111110000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 22 0x16 '^V' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 23 0x17 '^W' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 24 0x18 '^X' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 25 0x19 '^Y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 26 0x1a '^Z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x08, 0x00, /* 000010000000 */
0x18, 0x00, /* 000110000000 */
0x38, 0x00, /* 001110000000 */
0x7f, 0xe0, /* 011111111110 */
0xff, 0xe0, /* 111111111110 */
0x7f, 0xe0, /* 011111111110 */
0x38, 0x00, /* 001110000000 */
0x18, 0x00, /* 000110000000 */
0x08, 0x00, /* 000010000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 27 0x1b '^[' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x00, /* 000000010000 */
0x01, 0x80, /* 000000011000 */
0x01, 0xc0, /* 000000011100 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xf0, /* 011111111111 */
0x7f, 0xe0, /* 011111111110 */
0x01, 0xc0, /* 000000011100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x00, /* 000000010000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 28 0x1c '^\' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x3f, 0xe0, /* 001111111110 */
0x3f, 0xe0, /* 001111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 29 0x1d '^]' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x09, 0x00, /* 000010010000 */
0x19, 0x80, /* 000110011000 */
0x39, 0xc0, /* 001110011100 */
0x7f, 0xe0, /* 011111111110 */
0xff, 0xf0, /* 111111111111 */
0x7f, 0xe0, /* 011111111110 */
0x39, 0xc0, /* 001110011100 */
0x19, 0x80, /* 000110011000 */
0x09, 0x00, /* 000010010000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 30 0x1e '^^' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x3f, 0x80, /* 001111111000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 31 0x1f '^_' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 32 0x20 ' ' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 33 0x21 '!' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 34 0x22 '"' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 35 0x23 '#' */
0x00, 0x00, /* 000000000000 */
0x03, 0x30, /* 000000110011 */
0x03, 0x30, /* 000000110011 */
0x03, 0x30, /* 000000110011 */
0x06, 0x60, /* 000001100110 */
0x1f, 0xf0, /* 000111111111 */
0x1f, 0xf0, /* 000111111111 */
0x0c, 0xc0, /* 000011001100 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x7f, 0xc0, /* 011111111100 */
0x7f, 0xc0, /* 011111111100 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 36 0x24 '$' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x3f, 0xc0, /* 001111111100 */
0x66, 0xe0, /* 011001101110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x00, /* 011001100000 */
0x3e, 0x00, /* 001111100000 */
0x1f, 0x80, /* 000111111000 */
0x07, 0xc0, /* 000001111100 */
0x06, 0x60, /* 000001100110 */
0x06, 0x60, /* 000001100110 */
0x66, 0x60, /* 011001100110 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 37 0x25 '%' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x38, 0xc0, /* 001110001100 */
0x4c, 0xc0, /* 010011001100 */
0x45, 0x80, /* 010001011000 */
0x65, 0x80, /* 011001011000 */
0x3b, 0x00, /* 001110110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xc0, /* 000011011100 */
0x1a, 0x60, /* 000110100110 */
0x1a, 0x20, /* 000110100010 */
0x33, 0x20, /* 001100110010 */
0x31, 0xc0, /* 001100011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 38 0x26 '&' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x0f, 0x80, /* 000011111000 */
0x1e, 0x00, /* 000111100000 */
0x3e, 0x00, /* 001111100000 */
0x77, 0x00, /* 011101110000 */
0x63, 0x60, /* 011000110110 */
0x61, 0xe0, /* 011000011110 */
0x61, 0xc0, /* 011000011100 */
0x61, 0x80, /* 011000011000 */
0x3f, 0xe0, /* 001111111110 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 39 0x27 ''' */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 40 0x28 '(' */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 41 0x29 ')' */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 42 0x2a '*' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x66, 0x60, /* 011001100110 */
0x76, 0xe0, /* 011101101110 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x76, 0xe0, /* 011101101110 */
0x66, 0x60, /* 011001100110 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 43 0x2b '+' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 44 0x2c ',' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
/* 45 0x2d '-' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 46 0x2e '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 47 0x2f '/' */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 48 0x30 '0' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0x80, /* 000100011000 */
0x10, 0xc0, /* 000100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x80, /* 001100001000 */
0x18, 0x80, /* 000110001000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 49 0x31 '1' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x06, 0x00, /* 000001100000 */
0x0e, 0x00, /* 000011100000 */
0x1e, 0x00, /* 000111100000 */
0x36, 0x00, /* 001101100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 50 0x32 '2' */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x3f, 0x80, /* 001111111000 */
0x61, 0xc0, /* 011000011100 */
0x40, 0xc0, /* 010000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 51 0x33 '3' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x1f, 0xc0, /* 000111111100 */
0x20, 0xe0, /* 001000001110 */
0x40, 0x60, /* 010000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x07, 0xc0, /* 000001111100 */
0x0f, 0xc0, /* 000011111100 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0x40, /* 011000000100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 52 0x34 '4' */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x80, /* 000000111000 */
0x03, 0x80, /* 000000111000 */
0x05, 0x80, /* 000001011000 */
0x05, 0x80, /* 000001011000 */
0x09, 0x80, /* 000010011000 */
0x09, 0x80, /* 000010011000 */
0x11, 0x80, /* 000100011000 */
0x11, 0x80, /* 000100011000 */
0x21, 0x80, /* 001000011000 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xe0, /* 011111111110 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 53 0x35 '5' */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x00, /* 000100000000 */
0x10, 0x00, /* 000100000000 */
0x20, 0x00, /* 001000000000 */
0x3f, 0x80, /* 001111111000 */
0x31, 0xc0, /* 001100011100 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 54 0x36 '6' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x67, 0x80, /* 011001111000 */
0x6f, 0xc0, /* 011011111100 */
0x70, 0xe0, /* 011100001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x3f, 0x80, /* 001111111000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 55 0x37 '7' */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x3f, 0xe0, /* 001111111110 */
0x60, 0x40, /* 011000000100 */
0x00, 0x40, /* 000000000100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x80, /* 000000001000 */
0x00, 0x80, /* 000000001000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x00, /* 000000010000 */
0x01, 0x00, /* 000000010000 */
0x03, 0x00, /* 000000110000 */
0x02, 0x00, /* 000000100000 */
0x02, 0x00, /* 000000100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 56 0x38 '8' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0x80, /* 000100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x11, 0x80, /* 000100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 57 0x39 '9' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0xe0, /* 011100001110 */
0x3f, 0x60, /* 001111110110 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x07, 0x00, /* 000001110000 */
0x3c, 0x00, /* 001111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 58 0x3a ':' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 59 0x3b ';' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x10, 0x00, /* 000100000000 */
0x00, 0x00, /* 000000000000 */
/* 60 0x3c '<' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x01, 0xc0, /* 000000011100 */
0x07, 0x00, /* 000001110000 */
0x1c, 0x00, /* 000111000000 */
0x70, 0x00, /* 011100000000 */
0x70, 0x00, /* 011100000000 */
0x1c, 0x00, /* 000111000000 */
0x07, 0x00, /* 000001110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 61 0x3d '=' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 62 0x3e '>' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x38, 0x00, /* 001110000000 */
0x0e, 0x00, /* 000011100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0xe0, /* 000000001110 */
0x00, 0xe0, /* 000000001110 */
0x03, 0x80, /* 000000111000 */
0x0e, 0x00, /* 000011100000 */
0x38, 0x00, /* 001110000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 63 0x3f '?' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x39, 0xc0, /* 001110011100 */
0x20, 0xc0, /* 001000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 64 0x40 '@' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x3f, 0xc0, /* 001111111100 */
0x30, 0x60, /* 001100000110 */
0x60, 0x60, /* 011000000110 */
0x67, 0x20, /* 011001110010 */
0x6f, 0xa0, /* 011011111010 */
0x6c, 0xa0, /* 011011001010 */
0x6c, 0xa0, /* 011011001010 */
0x67, 0xe0, /* 011001111110 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x3f, 0xe0, /* 001111111110 */
0x0f, 0xe0, /* 000011111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 65 0x41 'A' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x09, 0x00, /* 000010010000 */
0x11, 0x80, /* 000100011000 */
0x11, 0x80, /* 000100011000 */
0x10, 0x80, /* 000100001000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x40, /* 001000000100 */
0x40, 0x60, /* 010000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 66 0x42 'B' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x60, 0x80, /* 011000001000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x61, 0x80, /* 011000011000 */
0x7f, 0x80, /* 011111111000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0xc0, /* 011000001100 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 67 0x43 'C' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x20, 0x00, /* 001000000000 */
0x30, 0x20, /* 001100000010 */
0x18, 0x40, /* 000110000100 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 68 0x44 'D' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x40, /* 011000000100 */
0x61, 0x80, /* 011000011000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 69 0x45 'E' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 70 0x46 'F' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xc0, /* 011111111100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 71 0x47 'G' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x61, 0xf0, /* 011000011111 */
0x60, 0x60, /* 011000000110 */
0x20, 0x60, /* 001000000110 */
0x30, 0x60, /* 001100000110 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 72 0x48 'H' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0xf0, 0xf0, /* 111100001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 73 0x49 'I' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 74 0x4a 'J' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x38, 0x00, /* 001110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
/* 75 0x4b 'K' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xe0, /* 111100001110 */
0x61, 0x80, /* 011000011000 */
0x63, 0x00, /* 011000110000 */
0x66, 0x00, /* 011001100000 */
0x6c, 0x00, /* 011011000000 */
0x78, 0x00, /* 011110000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xe0, /* 011000001110 */
0xf0, 0x70, /* 111100000111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 76 0x4c 'L' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 77 0x4d 'M' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xe0, 0x70, /* 111000000111 */
0x60, 0xe0, /* 011000001110 */
0x70, 0xe0, /* 011100001110 */
0x70, 0xe0, /* 011100001110 */
0x70, 0xe0, /* 011100001110 */
0x59, 0x60, /* 010110010110 */
0x59, 0x60, /* 010110010110 */
0x59, 0x60, /* 010110010110 */
0x4d, 0x60, /* 010011010110 */
0x4e, 0x60, /* 010011100110 */
0x4e, 0x60, /* 010011100110 */
0x44, 0x60, /* 010001000110 */
0x44, 0x60, /* 010001000110 */
0xe4, 0xf0, /* 111001001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 78 0x4e 'N' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xc0, 0x70, /* 110000000111 */
0x60, 0x20, /* 011000000010 */
0x70, 0x20, /* 011100000010 */
0x78, 0x20, /* 011110000010 */
0x58, 0x20, /* 010110000010 */
0x4c, 0x20, /* 010011000010 */
0x46, 0x20, /* 010001100010 */
0x47, 0x20, /* 010001110010 */
0x43, 0x20, /* 010000110010 */
0x41, 0xa0, /* 010000011010 */
0x40, 0xe0, /* 010000001110 */
0x40, 0xe0, /* 010000001110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0x30, /* 111000000011 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 79 0x4f 'O' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x20, 0x40, /* 001000000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 80 0x50 'P' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0x80, /* 011111111000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0xc0, /* 001100001100 */
0x37, 0x80, /* 001101111000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 81 0x51 'Q' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0x40, /* 001100000100 */
0x38, 0x40, /* 001110000100 */
0x1f, 0x80, /* 000111111000 */
0x0e, 0x00, /* 000011100000 */
0x1f, 0x00, /* 000111110000 */
0x23, 0x90, /* 001000111001 */
0x01, 0xe0, /* 000000011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 82 0x52 'R' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x00, /* 111111110000 */
0x61, 0x80, /* 011000011000 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0x80, /* 011000001000 */
0x7f, 0x00, /* 011111110000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0x61, 0xc0, /* 011000011100 */
0x60, 0xe0, /* 011000001110 */
0xf0, 0x70, /* 111100000111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 83 0x53 'S' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xe0, /* 000111111110 */
0x30, 0x60, /* 001100000110 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x00, /* 011100000000 */
0x3c, 0x00, /* 001111000000 */
0x1e, 0x00, /* 000111100000 */
0x07, 0x80, /* 000001111000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0xe0, /* 000000001110 */
0x40, 0x60, /* 010000000110 */
0x40, 0x60, /* 010000000110 */
0x60, 0xc0, /* 011000001100 */
0x7f, 0x80, /* 011111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 84 0x54 'T' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x46, 0x20, /* 010001100010 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 85 0x55 'U' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x40, /* 011100000100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 86 0x56 'V' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xe0, 0xe0, /* 111000001110 */
0x60, 0x40, /* 011000000100 */
0x30, 0x80, /* 001100001000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x80, /* 001100001000 */
0x19, 0x00, /* 000110010000 */
0x19, 0x00, /* 000110010000 */
0x19, 0x00, /* 000110010000 */
0x0a, 0x00, /* 000010100000 */
0x0e, 0x00, /* 000011100000 */
0x0e, 0x00, /* 000011100000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 87 0x57 'W' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0xf0, /* 111111101111 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x76, 0x20, /* 011101100010 */
0x77, 0x40, /* 011101110100 */
0x33, 0x40, /* 001100110100 */
0x37, 0x40, /* 001101110100 */
0x3b, 0xc0, /* 001110111100 */
0x3b, 0x80, /* 001110111000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 88 0x58 'X' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x38, 0x80, /* 001110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x11, 0x80, /* 000100011000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 89 0x59 'Y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 90 0x5a 'Z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xe0, /* 001111111110 */
0x20, 0xc0, /* 001000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x20, /* 000110000010 */
0x3f, 0xe0, /* 001111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 91 0x5b '[' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0f, 0x80, /* 000011111000 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 92 0x5c '\' */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 93 0x5d ']' */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x1f, 0x00, /* 000111110000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 94 0x5e '^' */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1b, 0x00, /* 000110110000 */
0x31, 0x80, /* 001100011000 */
0x60, 0xc0, /* 011000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 95 0x5f '_' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 96 0x60 '`' */
0x00, 0x00, /* 000000000000 */
0x01, 0x00, /* 000000010000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0x80, /* 000001111000 */
0x07, 0x80, /* 000001111000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 97 0x61 'a' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 98 0x62 'b' */
0x00, 0x00, /* 000000000000 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0xe0, 0x00, /* 111000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x67, 0x80, /* 011001111000 */
0x6f, 0xc0, /* 011011111100 */
0x70, 0xe0, /* 011100001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x60, /* 011100000110 */
0x78, 0xc0, /* 011110001100 */
0x4f, 0x80, /* 010011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 99 0x63 'c' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0xc0, /* 001100011100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x40, /* 011100000100 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 100 0x64 'd' */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x00, 0xe0, /* 000000001110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x0f, 0x60, /* 000011110110 */
0x31, 0xe0, /* 001100011110 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0xe0, /* 011100001110 */
0x39, 0x60, /* 001110010110 */
0x1e, 0x70, /* 000111100111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 101 0x65 'e' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 102 0x66 'f' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x04, 0xc0, /* 000001001100 */
0x04, 0xc0, /* 000001001100 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x3f, 0x80, /* 001111111000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x1e, 0x00, /* 000111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 103 0x67 'g' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x20, /* 000111110010 */
0x31, 0xe0, /* 001100011110 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x31, 0x80, /* 001100011000 */
0x3f, 0x00, /* 001111110000 */
0x60, 0x00, /* 011000000000 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0xe0, /* 001111111110 */
0x20, 0x60, /* 001000000110 */
0x40, 0x20, /* 010000000010 */
0x40, 0x20, /* 010000000010 */
0x7f, 0xc0, /* 011111111100 */
0x3f, 0x80, /* 001111111000 */
0x00, 0x00, /* 000000000000 */
/* 104 0x68 'h' */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x70, 0x00, /* 011100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x37, 0x80, /* 001101111000 */
0x39, 0xc0, /* 001110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 105 0x69 'i' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 106 0x6a 'j' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0xc0, /* 000000111100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x20, 0xc0, /* 001000001100 */
0x30, 0xc0, /* 001100001100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
/* 107 0x6b 'k' */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0xe0, 0x00, /* 111000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x61, 0xc0, /* 011000011100 */
0x63, 0x00, /* 011000110000 */
0x66, 0x00, /* 011001100000 */
0x7c, 0x00, /* 011111000000 */
0x78, 0x00, /* 011110000000 */
0x7c, 0x00, /* 011111000000 */
0x6e, 0x00, /* 011011100000 */
0x67, 0x00, /* 011001110000 */
0x63, 0x80, /* 011000111000 */
0xf1, 0xe0, /* 111100011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 108 0x6c 'l' */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 109 0x6d 'm' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xdd, 0xc0, /* 110111011100 */
0x6e, 0xe0, /* 011011101110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0xef, 0x70, /* 111011110111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 110 0x6e 'n' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x27, 0x80, /* 001001111000 */
0x79, 0xc0, /* 011110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 111 0x6f 'o' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 112 0x70 'p' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xef, 0x80, /* 111011111000 */
0x71, 0xc0, /* 011100011100 */
0x60, 0xe0, /* 011000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x40, /* 011000000100 */
0x70, 0x80, /* 011100001000 */
0x7f, 0x00, /* 011111110000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0xf0, 0x00, /* 111100000000 */
0x00, 0x00, /* 000000000000 */
/* 113 0x71 'q' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x20, /* 000011110010 */
0x11, 0xe0, /* 000100011110 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x60, /* 011100000110 */
0x38, 0xe0, /* 001110001110 */
0x1f, 0xe0, /* 000111111110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0x60, /* 000000000110 */
0x00, 0xf0, /* 000000001111 */
0x00, 0x00, /* 000000000000 */
/* 114 0x72 'r' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x73, 0x80, /* 011100111000 */
0x34, 0xc0, /* 001101001100 */
0x38, 0xc0, /* 001110001100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x78, 0x00, /* 011110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 115 0x73 's' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0xc0, /* 000111111100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x40, /* 001100000100 */
0x38, 0x00, /* 001110000000 */
0x1e, 0x00, /* 000111100000 */
0x07, 0x80, /* 000001111000 */
0x01, 0xc0, /* 000000011100 */
0x20, 0xc0, /* 001000001100 */
0x30, 0xc0, /* 001100001100 */
0x3f, 0x80, /* 001111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 116 0x74 't' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x7f, 0xc0, /* 011111111100 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x20, /* 000011000010 */
0x0e, 0x40, /* 000011100100 */
0x07, 0x80, /* 000001111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 117 0x75 'u' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 118 0x76 'v' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0x70, /* 111100000111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 119 0x77 'w' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x70, /* 111111110111 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x66, 0x20, /* 011001100010 */
0x37, 0x40, /* 001101110100 */
0x3b, 0x40, /* 001110110100 */
0x3b, 0x40, /* 001110110100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 120 0x78 'x' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf8, 0xf0, /* 111110001111 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1d, 0x00, /* 000111010000 */
0x0e, 0x00, /* 000011100000 */
0x07, 0x00, /* 000001110000 */
0x0b, 0x80, /* 000010111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0xf1, 0xf0, /* 111100011111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 121 0x79 'y' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x08, 0x00, /* 000010000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x00, 0x00, /* 000000000000 */
/* 122 0x7a 'z' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0xe0, /* 011000001110 */
0x41, 0xc0, /* 010000011100 */
0x03, 0x80, /* 000000111000 */
0x07, 0x00, /* 000001110000 */
0x0e, 0x00, /* 000011100000 */
0x1c, 0x00, /* 000111000000 */
0x38, 0x20, /* 001110000010 */
0x70, 0x60, /* 011100000110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 123 0x7b '{' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x38, 0x00, /* 001110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 124 0x7c '|' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
/* 125 0x7d '}' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0xc0, /* 000000011100 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1c, 0x00, /* 000111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 126 0x7e '~' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1c, 0x20, /* 000111000010 */
0x3e, 0x60, /* 001111100110 */
0x67, 0xc0, /* 011001111100 */
0x43, 0x80, /* 010000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 127 0x7f '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
/* 128 0x80 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xc0, /* 000011111100 */
0x10, 0x60, /* 000100000110 */
0x20, 0x20, /* 001000000010 */
0x20, 0x00, /* 001000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x20, 0x00, /* 001000000000 */
0x30, 0x20, /* 001100000010 */
0x18, 0x40, /* 000110000100 */
0x0f, 0x80, /* 000011111000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 129 0x81 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 130 0x82 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 131 0x83 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 132 0x84 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 133 0x85 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 134 0x86 '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x07, 0x00, /* 000001110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 135 0x87 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0xc0, /* 001100011100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x70, 0x40, /* 011100000100 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0x80, /* 000000011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 136 0x88 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 137 0x89 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 138 0x8a '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x00, /* 011000000000 */
0x60, 0x00, /* 011000000000 */
0x30, 0x00, /* 001100000000 */
0x18, 0x60, /* 000110000110 */
0x0f, 0x80, /* 000011111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 139 0x8b '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 140 0x8c '.' */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x0e, 0x00, /* 000011100000 */
0x1b, 0x00, /* 000110110000 */
0x31, 0x80, /* 001100011000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 141 0x8d '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 142 0x8e '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x04, 0x00, /* 000001000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x19, 0x80, /* 000110011000 */
0x11, 0x80, /* 000100011000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x60, /* 011000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 143 0x8f '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x04, 0x00, /* 000001000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x19, 0x80, /* 000110011000 */
0x11, 0x80, /* 000100011000 */
0x3f, 0xc0, /* 001111111100 */
0x20, 0xc0, /* 001000001100 */
0x60, 0x60, /* 011000000110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0xf0, /* 111000001111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 144 0x90 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x08, 0x00, /* 000010000000 */
0x7f, 0xe0, /* 011111111110 */
0x30, 0x20, /* 001100000010 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x80, /* 001100001000 */
0x3f, 0x80, /* 001111111000 */
0x30, 0x80, /* 001100001000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x20, /* 001100000010 */
0x30, 0x20, /* 001100000010 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 145 0x91 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3d, 0xe0, /* 001111011110 */
0x66, 0x30, /* 011001100011 */
0x46, 0x30, /* 010001100011 */
0x06, 0x30, /* 000001100011 */
0x3f, 0xf0, /* 001111111111 */
0x66, 0x00, /* 011001100000 */
0xc6, 0x00, /* 110001100000 */
0xc6, 0x00, /* 110001100000 */
0xe7, 0x30, /* 111001110011 */
0x7d, 0xe0, /* 011111011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 146 0x92 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0xf0, /* 000000111111 */
0x07, 0x10, /* 000001110001 */
0x07, 0x10, /* 000001110001 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x00, /* 000010110000 */
0x0b, 0x20, /* 000010110010 */
0x13, 0xe0, /* 000100111110 */
0x13, 0x20, /* 000100110010 */
0x3f, 0x00, /* 001111110000 */
0x23, 0x00, /* 001000110000 */
0x23, 0x00, /* 001000110000 */
0x43, 0x10, /* 010000110001 */
0x43, 0x10, /* 010000110001 */
0xe7, 0xf0, /* 111001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 147 0x93 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 148 0x94 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 149 0x95 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 150 0x96 '.' */
0x00, 0x00, /* 000000000000 */
0x02, 0x00, /* 000000100000 */
0x07, 0x00, /* 000001110000 */
0x0d, 0x80, /* 000011011000 */
0x18, 0xc0, /* 000110001100 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 151 0x97 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 152 0x98 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xf0, 0xf0, /* 111100001111 */
0x60, 0x20, /* 011000000010 */
0x30, 0x40, /* 001100000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x18, 0x80, /* 000110001000 */
0x0d, 0x00, /* 000011010000 */
0x0d, 0x00, /* 000011010000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x04, 0x00, /* 000001000000 */
0x0c, 0x00, /* 000011000000 */
0x08, 0x00, /* 000010000000 */
0x78, 0x00, /* 011110000000 */
0x70, 0x00, /* 011100000000 */
0x00, 0x00, /* 000000000000 */
/* 153 0x99 '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xc0, /* 001000001100 */
0x20, 0x60, /* 001000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x20, 0x40, /* 001000000100 */
0x30, 0x40, /* 001100000100 */
0x18, 0x80, /* 000110001000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 154 0x9a '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0xe0, 0x30, /* 111000000011 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x60, 0x20, /* 011000000010 */
0x70, 0x40, /* 011100000100 */
0x3f, 0xc0, /* 001111111100 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 155 0x9b '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x36, 0xc0, /* 001101101100 */
0x26, 0xc0, /* 001001101100 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x66, 0x00, /* 011001100000 */
0x76, 0x40, /* 011101100100 */
0x36, 0xc0, /* 001101101100 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 156 0x9c '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x1c, 0xc0, /* 000111001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x7e, 0x00, /* 011111100000 */
0x7e, 0x00, /* 011111100000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x3e, 0x20, /* 001111100010 */
0x7f, 0xe0, /* 011111111110 */
0x61, 0xc0, /* 011000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 157 0x9d '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 158 0x9e '.' */
0x00, 0x00, /* 000000000000 */
0x7f, 0x80, /* 011111111000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0xc0, /* 001100001100 */
0x37, 0x80, /* 001101111000 */
0x30, 0x00, /* 001100000000 */
0x33, 0x00, /* 001100110000 */
0x37, 0x80, /* 001101111000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x00, /* 001100110000 */
0x33, 0x30, /* 001100110011 */
0x31, 0xe0, /* 001100011110 */
0x78, 0xc0, /* 011110001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 159 0x9f '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x01, 0xe0, /* 000000011110 */
0x03, 0x30, /* 000000110011 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x3f, 0xe0, /* 001111111110 */
0x7f, 0xc0, /* 011111111100 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xcc, 0x00, /* 110011000000 */
0x78, 0x00, /* 011110000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
/* 160 0xa0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x18, 0xc0, /* 000110001100 */
0x10, 0xc0, /* 000100001100 */
0x03, 0xc0, /* 000000111100 */
0x1c, 0xc0, /* 000111001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0xe0, /* 000111101110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 161 0xa1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x1e, 0x00, /* 000111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x1f, 0x80, /* 000111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 162 0xa2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 163 0xa3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x01, 0x80, /* 000000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x79, 0xe0, /* 011110011110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x1e, 0x60, /* 000111100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 164 0xa4 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x40, /* 000111000100 */
0x3f, 0xc0, /* 001111111100 */
0x23, 0x80, /* 001000111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x27, 0x80, /* 001001111000 */
0x79, 0xc0, /* 011110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 165 0xa5 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x40, /* 000111000100 */
0x3f, 0xc0, /* 001111111100 */
0x23, 0x80, /* 001000111000 */
0xc0, 0x70, /* 110000000111 */
0x60, 0x20, /* 011000000010 */
0x70, 0x20, /* 011100000010 */
0x78, 0x20, /* 011110000010 */
0x5c, 0x20, /* 010111000010 */
0x4e, 0x20, /* 010011100010 */
0x47, 0x20, /* 010001110010 */
0x43, 0xa0, /* 010000111010 */
0x41, 0xe0, /* 010000011110 */
0x40, 0xe0, /* 010000001110 */
0x40, 0x60, /* 010000000110 */
0xe0, 0x30, /* 111000000011 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 166 0xa6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x01, 0x80, /* 000000011000 */
0x07, 0x80, /* 000001111000 */
0x19, 0x80, /* 000110011000 */
0x31, 0x80, /* 001100011000 */
0x31, 0x80, /* 001100011000 */
0x33, 0x80, /* 001100111000 */
0x1d, 0xc0, /* 000111011100 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 167 0xa7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0x00, /* 000001110000 */
0x19, 0x80, /* 000110011000 */
0x10, 0xc0, /* 000100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0x80, /* 001100001000 */
0x19, 0x80, /* 000110011000 */
0x0e, 0x00, /* 000011100000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 168 0xa8 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x40, /* 001100000100 */
0x39, 0xc0, /* 001110011100 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 169 0xa9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 170 0xaa '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 171 0xab '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x10, 0x00, /* 000100000000 */
0x10, 0x40, /* 000100000100 */
0x10, 0x80, /* 000100001000 */
0x11, 0x00, /* 000100010000 */
0x3a, 0x00, /* 001110100000 */
0x05, 0xc0, /* 000001011100 */
0x0a, 0x20, /* 000010100010 */
0x10, 0x20, /* 000100000010 */
0x20, 0xc0, /* 001000001100 */
0x41, 0x00, /* 010000010000 */
0x02, 0x00, /* 000000100000 */
0x03, 0xe0, /* 000000111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 172 0xac '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x10, 0x00, /* 000100000000 */
0x30, 0x00, /* 001100000000 */
0x10, 0x00, /* 000100000000 */
0x10, 0x40, /* 000100000100 */
0x10, 0x80, /* 000100001000 */
0x11, 0x00, /* 000100010000 */
0x3a, 0x40, /* 001110100100 */
0x04, 0xc0, /* 000001001100 */
0x09, 0x40, /* 000010010100 */
0x12, 0x40, /* 000100100100 */
0x24, 0x40, /* 001001000100 */
0x47, 0xe0, /* 010001111110 */
0x00, 0x40, /* 000000000100 */
0x00, 0x40, /* 000000000100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 173 0xad '.' */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 174 0xae '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x60, /* 000001100110 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x33, 0x00, /* 001100110000 */
0x19, 0x80, /* 000110011000 */
0x0c, 0xc0, /* 000011001100 */
0x06, 0x60, /* 000001100110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 175 0xaf '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x66, 0x00, /* 011001100000 */
0x33, 0x00, /* 001100110000 */
0x19, 0x80, /* 000110011000 */
0x0c, 0xc0, /* 000011001100 */
0x06, 0x60, /* 000001100110 */
0x0c, 0xc0, /* 000011001100 */
0x19, 0x80, /* 000110011000 */
0x33, 0x00, /* 001100110000 */
0x66, 0x00, /* 011001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 176 0xb0 '.' */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
0x61, 0x80, /* 011000011000 */
0x20, 0x80, /* 001000001000 */
0x0c, 0x30, /* 000011000011 */
0x08, 0x20, /* 000010000010 */
/* 177 0xb1 '.' */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
0x88, 0x80, /* 100010001000 */
0x22, 0x20, /* 001000100010 */
0x77, 0x70, /* 011101110111 */
0x22, 0x20, /* 001000100010 */
0x88, 0x80, /* 100010001000 */
0xdd, 0xd0, /* 110111011101 */
/* 178 0xb2 '.' */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
0x9e, 0x70, /* 100111100111 */
0xdf, 0x70, /* 110111110111 */
0xf3, 0xc0, /* 111100111100 */
0xf7, 0xd0, /* 111101111101 */
/* 179 0xb3 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 180 0xb4 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 181 0xb5 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 182 0xb6 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 183 0xb7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 184 0xb8 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 185 0xb9 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x01, 0x80, /* 000000011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 186 0xba '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 187 0xbb '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x01, 0x80, /* 000000011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 188 0xbc '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0x80, /* 111111011000 */
0xfd, 0x80, /* 111111011000 */
0x01, 0x80, /* 000000011000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 189 0xbd '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0x80, /* 111111111000 */
0xff, 0x80, /* 111111111000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 190 0xbe '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 191 0xbf '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 192 0xc0 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 193 0xc1 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 194 0xc2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 195 0xc3 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 196 0xc4 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 197 0xc5 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 198 0xc6 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 199 0xc7 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 200 0xc8 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0c, 0x00, /* 000011000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 201 0xc9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 202 0xca '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 203 0xcb '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 204 0xcc '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0c, 0x00, /* 000011000000 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0xf0, /* 000011011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 205 0xcd '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 206 0xce '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x00, 0x00, /* 000000000000 */
0xfd, 0xf0, /* 111111011111 */
0xfd, 0xf0, /* 111111011111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 207 0xcf '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 208 0xd0 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 209 0xd1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 210 0xd2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 211 0xd3 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 212 0xd4 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 213 0xd5 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 214 0xd6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0xf0, /* 000011111111 */
0x0f, 0xf0, /* 000011111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 215 0xd7 '.' */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
0x0d, 0x80, /* 000011011000 */
/* 216 0xd8 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 217 0xd9 '.' */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xfe, 0x00, /* 111111100000 */
0xfe, 0x00, /* 111111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 218 0xda '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xf0, /* 000001111111 */
0x07, 0xf0, /* 000001111111 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
/* 219 0xdb '.' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 220 0xdc '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
/* 221 0xdd '.' */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
0xfc, 0x00, /* 111111000000 */
/* 222 0xde '.' */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
0x03, 0xf0, /* 000000111111 */
/* 223 0xdf '.' */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0xff, 0xf0, /* 111111111111 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 224 0xe0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x60, /* 000011110110 */
0x13, 0xe0, /* 000100111110 */
0x21, 0xc0, /* 001000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x70, 0x80, /* 011100001000 */
0x39, 0xc0, /* 001110011100 */
0x1f, 0x60, /* 000111110110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 225 0xe1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x31, 0x80, /* 001100011000 */
0x37, 0x80, /* 001101111000 */
0x31, 0x80, /* 001100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x31, 0x80, /* 001100011000 */
0x77, 0x00, /* 011101110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 226 0xe2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xe0, /* 001111111110 */
0x3f, 0xe0, /* 001111111110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 227 0xe3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 228 0xe4 '.' */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x30, 0x60, /* 001100000110 */
0x30, 0x00, /* 001100000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x60, /* 001100000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 229 0xe5 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x07, 0xe0, /* 000001111110 */
0x0f, 0xe0, /* 000011111110 */
0x13, 0x80, /* 000100111000 */
0x21, 0xc0, /* 001000011100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x60, 0xc0, /* 011000001100 */
0x70, 0x80, /* 011100001000 */
0x39, 0x00, /* 001110010000 */
0x1e, 0x00, /* 000111100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 230 0xe6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x39, 0xc0, /* 001110011100 */
0x36, 0xe0, /* 001101101110 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 231 0xe7 '.' */
0x00, 0x00, /* 000000000000 */
0x19, 0x80, /* 000110011000 */
0x3f, 0xc0, /* 001111111100 */
0x66, 0x60, /* 011001100110 */
0x66, 0x60, /* 011001100110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 232 0xe8 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 233 0xe9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x1f, 0x80, /* 000111111000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 234 0xea '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1f, 0x00, /* 000111110000 */
0x31, 0x80, /* 001100011000 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0xd9, 0xb0, /* 110110011011 */
0x79, 0xe0, /* 011110011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 235 0xeb '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0x80, /* 000001111000 */
0x0c, 0xc0, /* 000011001100 */
0x18, 0x60, /* 000110000110 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x0f, 0x80, /* 000011111000 */
0x11, 0xc0, /* 000100011100 */
0x20, 0xe0, /* 001000001110 */
0x60, 0x60, /* 011000000110 */
0x60, 0x60, /* 011000000110 */
0x70, 0x40, /* 011100000100 */
0x38, 0x80, /* 001110001000 */
0x1f, 0x00, /* 000111110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 236 0xec '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x39, 0xc0, /* 001110011100 */
0x6f, 0x60, /* 011011110110 */
0x66, 0x60, /* 011001100110 */
0xc6, 0x30, /* 110001100011 */
0xc6, 0x30, /* 110001100011 */
0x66, 0x60, /* 011001100110 */
0x6f, 0x60, /* 011011110110 */
0x39, 0xc0, /* 001110011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 237 0xed '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0xc0, /* 000000001100 */
0x00, 0xc0, /* 000000001100 */
0x01, 0x80, /* 000000011000 */
0x01, 0x80, /* 000000011000 */
0x3b, 0xc0, /* 001110111100 */
0x6f, 0x60, /* 011011110110 */
0x66, 0x60, /* 011001100110 */
0xc6, 0x30, /* 110001100011 */
0xc6, 0x30, /* 110001100011 */
0x66, 0x60, /* 011001100110 */
0x6f, 0x60, /* 011011110110 */
0x3d, 0xc0, /* 001111011100 */
0x18, 0x00, /* 000110000000 */
0x18, 0x00, /* 000110000000 */
0x30, 0x00, /* 001100000000 */
0x30, 0x00, /* 001100000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 238 0xee '.' */
0x00, 0x00, /* 000000000000 */
0x01, 0xc0, /* 000000011100 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x00, /* 000110000000 */
0x1f, 0xc0, /* 000111111100 */
0x18, 0x00, /* 000110000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x03, 0x00, /* 000000110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 239 0xef '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x39, 0xc0, /* 001110011100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x30, 0xc0, /* 001100001100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 240 0xf0 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 241 0xf1 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 242 0xf2 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x60, 0x00, /* 011000000000 */
0x38, 0x00, /* 001110000000 */
0x0e, 0x00, /* 000011100000 */
0x03, 0x80, /* 000000111000 */
0x00, 0xe0, /* 000000001110 */
0x00, 0xe0, /* 000000001110 */
0x03, 0x80, /* 000000111000 */
0x0e, 0x00, /* 000011100000 */
0x38, 0x00, /* 001110000000 */
0x60, 0x00, /* 011000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 243 0xf3 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x60, /* 000000000110 */
0x01, 0xc0, /* 000000011100 */
0x07, 0x00, /* 000001110000 */
0x1c, 0x00, /* 000111000000 */
0x70, 0x00, /* 011100000000 */
0x70, 0x00, /* 011100000000 */
0x1c, 0x00, /* 000111000000 */
0x07, 0x00, /* 000001110000 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x60, /* 000000000110 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 244 0xf4 '.' */
0x00, 0x00, /* 000000000000 */
0x03, 0x80, /* 000000111000 */
0x07, 0xc0, /* 000001111100 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x60, /* 000011000110 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
0x0c, 0x00, /* 000011000000 */
/* 245 0xf5 '.' */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x3e, 0x00, /* 001111100000 */
0x63, 0x00, /* 011000110000 */
0x63, 0x00, /* 011000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
0x03, 0x00, /* 000000110000 */
/* 246 0xf6 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x7f, 0xe0, /* 011111111110 */
0x7f, 0xe0, /* 011111111110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 247 0xf7 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x38, 0x00, /* 001110000000 */
0x6c, 0x00, /* 011011000000 */
0x06, 0x30, /* 000001100011 */
0x03, 0x60, /* 000000110110 */
0x39, 0xc0, /* 001110011100 */
0x6c, 0x00, /* 011011000000 */
0x06, 0x30, /* 000001100011 */
0x03, 0x60, /* 000000110110 */
0x01, 0xc0, /* 000000011100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 248 0xf8 '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x19, 0x80, /* 000110011000 */
0x0f, 0x00, /* 000011110000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 249 0xf9 '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x1c, 0x00, /* 000111000000 */
0x3e, 0x00, /* 001111100000 */
0x3e, 0x00, /* 001111100000 */
0x3e, 0x00, /* 001111100000 */
0x1c, 0x00, /* 000111000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 250 0xfa '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x18, 0x00, /* 000110000000 */
0x3c, 0x00, /* 001111000000 */
0x3c, 0x00, /* 001111000000 */
0x18, 0x00, /* 000110000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 251 0xfb '.' */
0x00, 0x00, /* 000000000000 */
0x07, 0xe0, /* 000001111110 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0x06, 0x00, /* 000001100000 */
0xc6, 0x00, /* 110001100000 */
0x66, 0x00, /* 011001100000 */
0x36, 0x00, /* 001101100000 */
0x1e, 0x00, /* 000111100000 */
0x0e, 0x00, /* 000011100000 */
0x06, 0x00, /* 000001100000 */
0x02, 0x00, /* 000000100000 */
0x00, 0x00, /* 000000000000 */
/* 252 0xfc '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x13, 0x80, /* 000100111000 */
0x3d, 0xc0, /* 001111011100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x18, 0xc0, /* 000110001100 */
0x3d, 0xe0, /* 001111011110 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 253 0xfd '.' */
0x00, 0x00, /* 000000000000 */
0x0f, 0x00, /* 000011110000 */
0x1f, 0x80, /* 000111111000 */
0x31, 0x80, /* 001100011000 */
0x21, 0x80, /* 001000011000 */
0x03, 0x00, /* 000000110000 */
0x06, 0x00, /* 000001100000 */
0x0c, 0x00, /* 000011000000 */
0x18, 0x40, /* 000110000100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 254 0xfe '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x3f, 0xc0, /* 001111111100 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
/* 255 0xff '.' */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
0x00, 0x00, /* 000000000000 */
};
const struct font_desc font_sun_12x22 = {
.idx = SUN12x22_IDX,
.name = "SUN12x22",
.width = 12,
.height = 22,
.data = fontdata_sun12x22,
#ifdef __sparc__
.pref = 5,
#else
.pref = -1,
#endif
};
| gpl-2.0 |
colede/valgrind-freebsd | none/tests/amd64/bug137714-amd64.c | 106 | 2421 |
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
typedef unsigned char UChar;
typedef unsigned int UInt;
static UInt randomUInt ( void )
{
static UInt n = 0;
/* From "Numerical Recipes in C" 2nd Edition */
n = 1664525UL * n + 1013904223UL;
return n >> 17;
}
void maskmovq_mmx ( UChar* regL, UChar* regR )
{
int i;
UChar* dst = malloc(8);
assert(dst);
for (i = 0; i < 8; i++)
dst[i] = 17 * (i+1);
__asm__ __volatile__(
"emms\n\t"
"movq (%0), %%mm1\n\t"
"movq (%1), %%mm2\n\t"
"movq %2, %%rdi\n\t"
"maskmovq %%mm1,%%mm2"
: /*out*/
: /*in*/ "r"(regL), "r"(regR), "r"(&dst[0])
: /*trash*/ "rdi", "memory", "cc"
);
for (i = 0; i < 8; i++)
printf("%02x", dst[i]);
free(dst);
}
void maskmovdqu_sse ( UChar* regL, UChar* regR )
{
int i;
UChar* dst = malloc(16);
assert(dst);
for (i = 0; i < 16; i++)
dst[i] = i;
__asm__ __volatile__(
"movups (%0), %%xmm1\n\t"
"movups (%1), %%xmm12\n\t"
"movq %2, %%rdi\n\t"
"maskmovdqu %%xmm12,%%xmm1\n\t"
"sfence"
: /*out*/
: /*in*/ "r"(regL), "r"(regR), "r"(dst)
: /*trash*/ "rdi", "memory", "cc"
);
for (i = 0; i < 16; i++)
printf("%02x", dst[i]);
free(dst);
}
int main ( int argc, char** argv )
{
int i, j;
/* mmx test */
{
UChar* regL = malloc(8);
UChar* regR = malloc(8);
assert(regL);
assert(regR);
for (i = 0; i < 10; i++) {
for (j = 0; j < 8; j++) {
regL[j] = (UChar)randomUInt();
printf("%02x", regL[j]);
}
printf(" ");
for (j = 0; j < 8; j++) {
regR[j] = (UChar)randomUInt();
printf("%02x", regR[j]);
}
printf(" ");
maskmovq_mmx( regR, regL );
printf("\n");
}
}
/* sse test */
{
UChar* regL = malloc(16);
UChar* regR = malloc(16);
assert(regL);
assert(regR);
for (i = 0; i < 10; i++) {
for (j = 0; j < 16; j++) {
regL[j] = (UChar)randomUInt();
printf("%02x", regL[j]);
}
printf(" ");
for (j = 0; j < 16; j++) {
regR[j] = (UChar)randomUInt();
printf("%02x", regR[j]);
}
printf(" ");
maskmovdqu_sse( regR, regL );
printf("\n");
}
}
return 0;
}
| gpl-2.0 |
acuoci/OpenFOAMTrainingCombustion | Libraries/eigen-3.3.3/test/diagonalmatrices.cpp | 106 | 5840 | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
using namespace std;
template<typename MatrixType> void diagonalmatrices(const MatrixType& m)
{
typedef typename MatrixType::Index Index;
typedef typename MatrixType::Scalar Scalar;
enum { Rows = MatrixType::RowsAtCompileTime, Cols = MatrixType::ColsAtCompileTime };
typedef Matrix<Scalar, Rows, 1> VectorType;
typedef Matrix<Scalar, 1, Cols> RowVectorType;
typedef Matrix<Scalar, Rows, Rows> SquareMatrixType;
typedef Matrix<Scalar, Dynamic, Dynamic> DynMatrixType;
typedef DiagonalMatrix<Scalar, Rows> LeftDiagonalMatrix;
typedef DiagonalMatrix<Scalar, Cols> RightDiagonalMatrix;
typedef Matrix<Scalar, Rows==Dynamic?Dynamic:2*Rows, Cols==Dynamic?Dynamic:2*Cols> BigMatrix;
Index rows = m.rows();
Index cols = m.cols();
MatrixType m1 = MatrixType::Random(rows, cols),
m2 = MatrixType::Random(rows, cols);
VectorType v1 = VectorType::Random(rows),
v2 = VectorType::Random(rows);
RowVectorType rv1 = RowVectorType::Random(cols),
rv2 = RowVectorType::Random(cols);
LeftDiagonalMatrix ldm1(v1), ldm2(v2);
RightDiagonalMatrix rdm1(rv1), rdm2(rv2);
Scalar s1 = internal::random<Scalar>();
SquareMatrixType sq_m1 (v1.asDiagonal());
VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix());
sq_m1 = v1.asDiagonal();
VERIFY_IS_APPROX(sq_m1, v1.asDiagonal().toDenseMatrix());
SquareMatrixType sq_m2 = v1.asDiagonal();
VERIFY_IS_APPROX(sq_m1, sq_m2);
ldm1 = v1.asDiagonal();
LeftDiagonalMatrix ldm3(v1);
VERIFY_IS_APPROX(ldm1.diagonal(), ldm3.diagonal());
LeftDiagonalMatrix ldm4 = v1.asDiagonal();
VERIFY_IS_APPROX(ldm1.diagonal(), ldm4.diagonal());
sq_m1.block(0,0,rows,rows) = ldm1;
VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix());
sq_m1.transpose() = ldm1;
VERIFY_IS_APPROX(sq_m1, ldm1.toDenseMatrix());
Index i = internal::random<Index>(0, rows-1);
Index j = internal::random<Index>(0, cols-1);
VERIFY_IS_APPROX( ((ldm1 * m1)(i,j)) , ldm1.diagonal()(i) * m1(i,j) );
VERIFY_IS_APPROX( ((ldm1 * (m1+m2))(i,j)) , ldm1.diagonal()(i) * (m1+m2)(i,j) );
VERIFY_IS_APPROX( ((m1 * rdm1)(i,j)) , rdm1.diagonal()(j) * m1(i,j) );
VERIFY_IS_APPROX( ((v1.asDiagonal() * m1)(i,j)) , v1(i) * m1(i,j) );
VERIFY_IS_APPROX( ((m1 * rv1.asDiagonal())(i,j)) , rv1(j) * m1(i,j) );
VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * m1)(i,j)) , (v1+v2)(i) * m1(i,j) );
VERIFY_IS_APPROX( (((v1+v2).asDiagonal() * (m1+m2))(i,j)) , (v1+v2)(i) * (m1+m2)(i,j) );
VERIFY_IS_APPROX( ((m1 * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * m1(i,j) );
VERIFY_IS_APPROX( (((m1+m2) * (rv1+rv2).asDiagonal())(i,j)) , (rv1+rv2)(j) * (m1+m2)(i,j) );
if(rows>1)
{
DynMatrixType tmp = m1.topRows(rows/2), res;
VERIFY_IS_APPROX( (res = m1.topRows(rows/2) * rv1.asDiagonal()), tmp * rv1.asDiagonal() );
VERIFY_IS_APPROX( (res = v1.head(rows/2).asDiagonal()*m1.topRows(rows/2)), v1.head(rows/2).asDiagonal()*tmp );
}
BigMatrix big;
big.setZero(2*rows, 2*cols);
big.block(i,j,rows,cols) = m1;
big.block(i,j,rows,cols) = v1.asDiagonal() * big.block(i,j,rows,cols);
VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , v1.asDiagonal() * m1 );
big.block(i,j,rows,cols) = m1;
big.block(i,j,rows,cols) = big.block(i,j,rows,cols) * rv1.asDiagonal();
VERIFY_IS_APPROX((big.block(i,j,rows,cols)) , m1 * rv1.asDiagonal() );
// scalar multiple
VERIFY_IS_APPROX(LeftDiagonalMatrix(ldm1*s1).diagonal(), ldm1.diagonal() * s1);
VERIFY_IS_APPROX(LeftDiagonalMatrix(s1*ldm1).diagonal(), s1 * ldm1.diagonal());
VERIFY_IS_APPROX(m1 * (rdm1 * s1), (m1 * rdm1) * s1);
VERIFY_IS_APPROX(m1 * (s1 * rdm1), (m1 * rdm1) * s1);
// Diagonal to dense
sq_m1.setRandom();
sq_m2 = sq_m1;
VERIFY_IS_APPROX( (sq_m1 += (s1*v1).asDiagonal()), sq_m2 += (s1*v1).asDiagonal().toDenseMatrix() );
VERIFY_IS_APPROX( (sq_m1 -= (s1*v1).asDiagonal()), sq_m2 -= (s1*v1).asDiagonal().toDenseMatrix() );
VERIFY_IS_APPROX( (sq_m1 = (s1*v1).asDiagonal()), (s1*v1).asDiagonal().toDenseMatrix() );
}
template<int>
void bug987()
{
Matrix3Xd points = Matrix3Xd::Random(3, 3);
Vector2d diag = Vector2d::Random();
Matrix2Xd tmp1 = points.topRows<2>(), res1, res2;
VERIFY_IS_APPROX( res1 = diag.asDiagonal() * points.topRows<2>(), res2 = diag.asDiagonal() * tmp1 );
Matrix2d tmp2 = points.topLeftCorner<2,2>();
VERIFY_IS_APPROX(( res1 = points.topLeftCorner<2,2>()*diag.asDiagonal()) , res2 = tmp2*diag.asDiagonal() );
}
void test_diagonalmatrices()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( diagonalmatrices(Matrix<float, 1, 1>()) );
CALL_SUBTEST_2( diagonalmatrices(Matrix3f()) );
CALL_SUBTEST_3( diagonalmatrices(Matrix<double,3,3,RowMajor>()) );
CALL_SUBTEST_4( diagonalmatrices(Matrix4d()) );
CALL_SUBTEST_5( diagonalmatrices(Matrix<float,4,4,RowMajor>()) );
CALL_SUBTEST_6( diagonalmatrices(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_7( diagonalmatrices(MatrixXi(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_8( diagonalmatrices(Matrix<double,Dynamic,Dynamic,RowMajor>(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
CALL_SUBTEST_9( diagonalmatrices(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
CALL_SUBTEST_10( bug987<0>() );
}
| gpl-2.0 |
jidongxiao/hyperpsonline | qemu-2.2.0/disas/i386.c | 106 | 167185 | /* opcodes/i386-dis.c r1.126 */
/* Print i386 instructions for GDB, the GNU debugger.
Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>. */
/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu)
July 1988
modified by John Hassey (hassey@dg-rtp.dg.com)
x86-64 support added by Jan Hubicka (jh@suse.cz)
VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */
/* The main tables describing the instructions is essentially a copy
of the "Opcode Map" chapter (Appendix A) of the Intel 80386
Programmers Manual. Usually, there is a capital letter, followed
by a small letter. The capital letter tell the addressing mode,
and the small letter tells about the operand size. Refer to
the Intel manual for details. */
#include <stdlib.h>
#include "disas/bfd.h"
/* include/opcode/i386.h r1.78 */
/* opcode/i386.h -- Intel 80386 opcode macros
Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
This file is part of GAS, the GNU Assembler, and GDB, the GNU Debugger.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>. */
/* The SystemV/386 SVR3.2 assembler, and probably all AT&T derived
ix86 Unix assemblers, generate floating point instructions with
reversed source and destination registers in certain cases.
Unfortunately, gcc and possibly many other programs use this
reversed syntax, so we're stuck with it.
eg. `fsub %st(3),%st' results in st = st - st(3) as expected, but
`fsub %st,%st(3)' results in st(3) = st - st(3), rather than
the expected st(3) = st(3) - st
This happens with all the non-commutative arithmetic floating point
operations with two register operands, where the source register is
%st, and destination register is %st(i).
The affected opcode map is dceX, dcfX, deeX, defX. */
#ifndef SYSV386_COMPAT
/* Set non-zero for broken, compatible instructions. Set to zero for
non-broken opcodes at your peril. gcc generates SystemV/386
compatible instructions. */
#define SYSV386_COMPAT 1
#endif
#ifndef OLDGCC_COMPAT
/* Set non-zero to cater for old (<= 2.8.1) versions of gcc that could
generate nonsense fsubp, fsubrp, fdivp and fdivrp with operands
reversed. */
#define OLDGCC_COMPAT SYSV386_COMPAT
#endif
#define MOV_AX_DISP32 0xa0
#define POP_SEG_SHORT 0x07
#define JUMP_PC_RELATIVE 0xeb
#define INT_OPCODE 0xcd
#define INT3_OPCODE 0xcc
/* The opcode for the fwait instruction, which disassembler treats as a
prefix when it can. */
#define FWAIT_OPCODE 0x9b
#define ADDR_PREFIX_OPCODE 0x67
#define DATA_PREFIX_OPCODE 0x66
#define LOCK_PREFIX_OPCODE 0xf0
#define CS_PREFIX_OPCODE 0x2e
#define DS_PREFIX_OPCODE 0x3e
#define ES_PREFIX_OPCODE 0x26
#define FS_PREFIX_OPCODE 0x64
#define GS_PREFIX_OPCODE 0x65
#define SS_PREFIX_OPCODE 0x36
#define REPNE_PREFIX_OPCODE 0xf2
#define REPE_PREFIX_OPCODE 0xf3
#define TWO_BYTE_OPCODE_ESCAPE 0x0f
#define NOP_OPCODE (char) 0x90
/* register numbers */
#define EBP_REG_NUM 5
#define ESP_REG_NUM 4
/* modrm_byte.regmem for twobyte escape */
#define ESCAPE_TO_TWO_BYTE_ADDRESSING ESP_REG_NUM
/* index_base_byte.index for no index register addressing */
#define NO_INDEX_REGISTER ESP_REG_NUM
/* index_base_byte.base for no base register addressing */
#define NO_BASE_REGISTER EBP_REG_NUM
#define NO_BASE_REGISTER_16 6
/* modrm.mode = REGMEM_FIELD_HAS_REG when a register is in there */
#define REGMEM_FIELD_HAS_REG 0x3/* always = 0x3 */
#define REGMEM_FIELD_HAS_MEM (~REGMEM_FIELD_HAS_REG)
/* x86-64 extension prefix. */
#define REX_OPCODE 0x40
/* Indicates 64 bit operand size. */
#define REX_W 8
/* High extension to reg field of modrm byte. */
#define REX_R 4
/* High extension to SIB index field. */
#define REX_X 2
/* High extension to base field of modrm or SIB, or reg field of opcode. */
#define REX_B 1
/* max operands per insn */
#define MAX_OPERANDS 4
/* max immediates per insn (lcall, ljmp, insertq, extrq) */
#define MAX_IMMEDIATE_OPERANDS 2
/* max memory refs per insn (string ops) */
#define MAX_MEMORY_OPERANDS 2
/* max size of insn mnemonics. */
#define MAX_MNEM_SIZE 16
/* max size of register name in insn mnemonics. */
#define MAX_REG_NAME_SIZE 8
/* opcodes/i386-dis.c r1.126 */
#include "qemu-common.h"
#include <setjmp.h>
static int fetch_data2(struct disassemble_info *, bfd_byte *);
static int fetch_data(struct disassemble_info *, bfd_byte *);
static void ckprefix (void);
static const char *prefix_name (int, int);
static int print_insn (bfd_vma, disassemble_info *);
static void dofloat (int);
static void OP_ST (int, int);
static void OP_STi (int, int);
static int putop (const char *, int);
static void oappend (const char *);
static void append_seg (void);
static void OP_indirE (int, int);
static void print_operand_value (char *buf, size_t bufsize, int hex, bfd_vma disp);
static void print_displacement (char *, bfd_vma);
static void OP_E (int, int);
static void OP_G (int, int);
static void OP_vvvv (int, int);
static bfd_vma get64 (void);
static bfd_signed_vma get32 (void);
static bfd_signed_vma get32s (void);
static int get16 (void);
static void set_op (bfd_vma, int);
static void OP_REG (int, int);
static void OP_IMREG (int, int);
static void OP_I (int, int);
static void OP_I64 (int, int);
static void OP_sI (int, int);
static void OP_J (int, int);
static void OP_SEG (int, int);
static void OP_DIR (int, int);
static void OP_OFF (int, int);
static void OP_OFF64 (int, int);
static void ptr_reg (int, int);
static void OP_ESreg (int, int);
static void OP_DSreg (int, int);
static void OP_C (int, int);
static void OP_D (int, int);
static void OP_T (int, int);
static void OP_R (int, int);
static void OP_MMX (int, int);
static void OP_XMM (int, int);
static void OP_EM (int, int);
static void OP_EX (int, int);
static void OP_EMC (int,int);
static void OP_MXC (int,int);
static void OP_MS (int, int);
static void OP_XS (int, int);
static void OP_M (int, int);
static void OP_VMX (int, int);
static void OP_0fae (int, int);
static void OP_0f07 (int, int);
static void NOP_Fixup1 (int, int);
static void NOP_Fixup2 (int, int);
static void OP_3DNowSuffix (int, int);
static void OP_SIMD_Suffix (int, int);
static void SIMD_Fixup (int, int);
static void PNI_Fixup (int, int);
static void SVME_Fixup (int, int);
static void INVLPG_Fixup (int, int);
static void BadOp (void);
static void VMX_Fixup (int, int);
static void REP_Fixup (int, int);
static void CMPXCHG8B_Fixup (int, int);
static void XMM_Fixup (int, int);
static void CRC32_Fixup (int, int);
struct dis_private {
/* Points to first byte not fetched. */
bfd_byte *max_fetched;
bfd_byte the_buffer[MAX_MNEM_SIZE];
bfd_vma insn_start;
int orig_sizeflag;
sigjmp_buf bailout;
};
enum address_mode
{
mode_16bit,
mode_32bit,
mode_64bit
};
static enum address_mode address_mode;
/* Flags for the prefixes for the current instruction. See below. */
static int prefixes;
/* REX prefix the current instruction. See below. */
static int rex;
/* Bits of REX we've already used. */
static int rex_used;
/* Mark parts used in the REX prefix. When we are testing for
empty prefix (for 8bit register REX extension), just mask it
out. Otherwise test for REX bit is excuse for existence of REX
only in case value is nonzero. */
#define USED_REX(value) \
{ \
if (value) \
{ \
if ((rex & value)) \
rex_used |= (value) | REX_OPCODE; \
} \
else \
rex_used |= REX_OPCODE; \
}
/* Flags for prefixes which we somehow handled when printing the
current instruction. */
static int used_prefixes;
/* The VEX.vvvv register, unencoded. */
static int vex_reg;
/* Flags stored in PREFIXES. */
#define PREFIX_REPZ 1
#define PREFIX_REPNZ 2
#define PREFIX_LOCK 4
#define PREFIX_CS 8
#define PREFIX_SS 0x10
#define PREFIX_DS 0x20
#define PREFIX_ES 0x40
#define PREFIX_FS 0x80
#define PREFIX_GS 0x100
#define PREFIX_DATA 0x200
#define PREFIX_ADDR 0x400
#define PREFIX_FWAIT 0x800
#define PREFIX_VEX_0F 0x1000
#define PREFIX_VEX_0F38 0x2000
#define PREFIX_VEX_0F3A 0x4000
/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive)
to ADDR (exclusive) are valid. Returns 1 for success, longjmps
on error. */
static int
fetch_data2(struct disassemble_info *info, bfd_byte *addr)
{
int status;
struct dis_private *priv = (struct dis_private *) info->private_data;
bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer);
if (addr <= priv->the_buffer + MAX_MNEM_SIZE)
status = (*info->read_memory_func) (start,
priv->max_fetched,
addr - priv->max_fetched,
info);
else
status = -1;
if (status != 0)
{
/* If we did manage to read at least one byte, then
print_insn_i386 will do something sensible. Otherwise, print
an error. We do that here because this is where we know
STATUS. */
if (priv->max_fetched == priv->the_buffer)
(*info->memory_error_func) (status, start, info);
siglongjmp(priv->bailout, 1);
}
else
priv->max_fetched = addr;
return 1;
}
static int
fetch_data(struct disassemble_info *info, bfd_byte *addr)
{
if (addr <= ((struct dis_private *) (info->private_data))->max_fetched) {
return 1;
} else {
return fetch_data2(info, addr);
}
}
#define XX { NULL, 0 }
#define Bv { OP_vvvv, v_mode }
#define Eb { OP_E, b_mode }
#define Ev { OP_E, v_mode }
#define Ed { OP_E, d_mode }
#define Edq { OP_E, dq_mode }
#define Edqw { OP_E, dqw_mode }
#define Edqb { OP_E, dqb_mode }
#define Edqd { OP_E, dqd_mode }
#define indirEv { OP_indirE, stack_v_mode }
#define indirEp { OP_indirE, f_mode }
#define stackEv { OP_E, stack_v_mode }
#define Em { OP_E, m_mode }
#define Ew { OP_E, w_mode }
#define M { OP_M, 0 } /* lea, lgdt, etc. */
#define Ma { OP_M, v_mode }
#define Mp { OP_M, f_mode } /* 32 or 48 bit memory operand for LDS, LES etc */
#define Mq { OP_M, q_mode }
#define Gb { OP_G, b_mode }
#define Gv { OP_G, v_mode }
#define Gd { OP_G, d_mode }
#define Gdq { OP_G, dq_mode }
#define Gm { OP_G, m_mode }
#define Gw { OP_G, w_mode }
#define Rd { OP_R, d_mode }
#define Rm { OP_R, m_mode }
#define Ib { OP_I, b_mode }
#define sIb { OP_sI, b_mode } /* sign extened byte */
#define Iv { OP_I, v_mode }
#define Iq { OP_I, q_mode }
#define Iv64 { OP_I64, v_mode }
#define Iw { OP_I, w_mode }
#define I1 { OP_I, const_1_mode }
#define Jb { OP_J, b_mode }
#define Jv { OP_J, v_mode }
#define Cm { OP_C, m_mode }
#define Dm { OP_D, m_mode }
#define Td { OP_T, d_mode }
#define RMeAX { OP_REG, eAX_reg }
#define RMeBX { OP_REG, eBX_reg }
#define RMeCX { OP_REG, eCX_reg }
#define RMeDX { OP_REG, eDX_reg }
#define RMeSP { OP_REG, eSP_reg }
#define RMeBP { OP_REG, eBP_reg }
#define RMeSI { OP_REG, eSI_reg }
#define RMeDI { OP_REG, eDI_reg }
#define RMrAX { OP_REG, rAX_reg }
#define RMrBX { OP_REG, rBX_reg }
#define RMrCX { OP_REG, rCX_reg }
#define RMrDX { OP_REG, rDX_reg }
#define RMrSP { OP_REG, rSP_reg }
#define RMrBP { OP_REG, rBP_reg }
#define RMrSI { OP_REG, rSI_reg }
#define RMrDI { OP_REG, rDI_reg }
#define RMAL { OP_REG, al_reg }
#define RMAL { OP_REG, al_reg }
#define RMCL { OP_REG, cl_reg }
#define RMDL { OP_REG, dl_reg }
#define RMBL { OP_REG, bl_reg }
#define RMAH { OP_REG, ah_reg }
#define RMCH { OP_REG, ch_reg }
#define RMDH { OP_REG, dh_reg }
#define RMBH { OP_REG, bh_reg }
#define RMAX { OP_REG, ax_reg }
#define RMDX { OP_REG, dx_reg }
#define eAX { OP_IMREG, eAX_reg }
#define eBX { OP_IMREG, eBX_reg }
#define eCX { OP_IMREG, eCX_reg }
#define eDX { OP_IMREG, eDX_reg }
#define eSP { OP_IMREG, eSP_reg }
#define eBP { OP_IMREG, eBP_reg }
#define eSI { OP_IMREG, eSI_reg }
#define eDI { OP_IMREG, eDI_reg }
#define AL { OP_IMREG, al_reg }
#define CL { OP_IMREG, cl_reg }
#define DL { OP_IMREG, dl_reg }
#define BL { OP_IMREG, bl_reg }
#define AH { OP_IMREG, ah_reg }
#define CH { OP_IMREG, ch_reg }
#define DH { OP_IMREG, dh_reg }
#define BH { OP_IMREG, bh_reg }
#define AX { OP_IMREG, ax_reg }
#define DX { OP_IMREG, dx_reg }
#define zAX { OP_IMREG, z_mode_ax_reg }
#define indirDX { OP_IMREG, indir_dx_reg }
#define Sw { OP_SEG, w_mode }
#define Sv { OP_SEG, v_mode }
#define Ap { OP_DIR, 0 }
#define Ob { OP_OFF64, b_mode }
#define Ov { OP_OFF64, v_mode }
#define Xb { OP_DSreg, eSI_reg }
#define Xv { OP_DSreg, eSI_reg }
#define Xz { OP_DSreg, eSI_reg }
#define Yb { OP_ESreg, eDI_reg }
#define Yv { OP_ESreg, eDI_reg }
#define DSBX { OP_DSreg, eBX_reg }
#define es { OP_REG, es_reg }
#define ss { OP_REG, ss_reg }
#define cs { OP_REG, cs_reg }
#define ds { OP_REG, ds_reg }
#define fs { OP_REG, fs_reg }
#define gs { OP_REG, gs_reg }
#define MX { OP_MMX, 0 }
#define XM { OP_XMM, 0 }
#define EM { OP_EM, v_mode }
#define EMd { OP_EM, d_mode }
#define EMq { OP_EM, q_mode }
#define EXd { OP_EX, d_mode }
#define EXq { OP_EX, q_mode }
#define EXx { OP_EX, x_mode }
#define MS { OP_MS, v_mode }
#define XS { OP_XS, v_mode }
#define EMC { OP_EMC, v_mode }
#define MXC { OP_MXC, 0 }
#define VM { OP_VMX, q_mode }
#define OPSUF { OP_3DNowSuffix, 0 }
#define OPSIMD { OP_SIMD_Suffix, 0 }
#define XMM0 { XMM_Fixup, 0 }
/* Used handle "rep" prefix for string instructions. */
#define Xbr { REP_Fixup, eSI_reg }
#define Xvr { REP_Fixup, eSI_reg }
#define Ybr { REP_Fixup, eDI_reg }
#define Yvr { REP_Fixup, eDI_reg }
#define Yzr { REP_Fixup, eDI_reg }
#define indirDXr { REP_Fixup, indir_dx_reg }
#define ALr { REP_Fixup, al_reg }
#define eAXr { REP_Fixup, eAX_reg }
#define cond_jump_flag { NULL, cond_jump_mode }
#define loop_jcxz_flag { NULL, loop_jcxz_mode }
/* bits in sizeflag */
#define SUFFIX_ALWAYS 4
#define AFLAG 2
#define DFLAG 1
#define b_mode 1 /* byte operand */
#define v_mode 2 /* operand size depends on prefixes */
#define w_mode 3 /* word operand */
#define d_mode 4 /* double word operand */
#define q_mode 5 /* quad word operand */
#define t_mode 6 /* ten-byte operand */
#define x_mode 7 /* 16-byte XMM operand */
#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */
#define cond_jump_mode 9
#define loop_jcxz_mode 10
#define dq_mode 11 /* operand size depends on REX prefixes. */
#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */
#define f_mode 13 /* 4- or 6-byte pointer operand */
#define const_1_mode 14
#define stack_v_mode 15 /* v_mode for stack-related opcodes. */
#define z_mode 16 /* non-quad operand size depends on prefixes */
#define o_mode 17 /* 16-byte operand */
#define dqb_mode 18 /* registers like dq_mode, memory like b_mode. */
#define dqd_mode 19 /* registers like dq_mode, memory like d_mode. */
#define es_reg 100
#define cs_reg 101
#define ss_reg 102
#define ds_reg 103
#define fs_reg 104
#define gs_reg 105
#define eAX_reg 108
#define eCX_reg 109
#define eDX_reg 110
#define eBX_reg 111
#define eSP_reg 112
#define eBP_reg 113
#define eSI_reg 114
#define eDI_reg 115
#define al_reg 116
#define cl_reg 117
#define dl_reg 118
#define bl_reg 119
#define ah_reg 120
#define ch_reg 121
#define dh_reg 122
#define bh_reg 123
#define ax_reg 124
#define cx_reg 125
#define dx_reg 126
#define bx_reg 127
#define sp_reg 128
#define bp_reg 129
#define si_reg 130
#define di_reg 131
#define rAX_reg 132
#define rCX_reg 133
#define rDX_reg 134
#define rBX_reg 135
#define rSP_reg 136
#define rBP_reg 137
#define rSI_reg 138
#define rDI_reg 139
#define z_mode_ax_reg 149
#define indir_dx_reg 150
#define FLOATCODE 1
#define USE_GROUPS 2
#define USE_PREFIX_USER_TABLE 3
#define X86_64_SPECIAL 4
#define IS_3BYTE_OPCODE 5
#define FLOAT NULL, { { NULL, FLOATCODE } }
#define GRP1a NULL, { { NULL, USE_GROUPS }, { NULL, 0 } }
#define GRP1b NULL, { { NULL, USE_GROUPS }, { NULL, 1 } }
#define GRP1S NULL, { { NULL, USE_GROUPS }, { NULL, 2 } }
#define GRP1Ss NULL, { { NULL, USE_GROUPS }, { NULL, 3 } }
#define GRP2b NULL, { { NULL, USE_GROUPS }, { NULL, 4 } }
#define GRP2S NULL, { { NULL, USE_GROUPS }, { NULL, 5 } }
#define GRP2b_one NULL, { { NULL, USE_GROUPS }, { NULL, 6 } }
#define GRP2S_one NULL, { { NULL, USE_GROUPS }, { NULL, 7 } }
#define GRP2b_cl NULL, { { NULL, USE_GROUPS }, { NULL, 8 } }
#define GRP2S_cl NULL, { { NULL, USE_GROUPS }, { NULL, 9 } }
#define GRP3b NULL, { { NULL, USE_GROUPS }, { NULL, 10 } }
#define GRP3S NULL, { { NULL, USE_GROUPS }, { NULL, 11 } }
#define GRP4 NULL, { { NULL, USE_GROUPS }, { NULL, 12 } }
#define GRP5 NULL, { { NULL, USE_GROUPS }, { NULL, 13 } }
#define GRP6 NULL, { { NULL, USE_GROUPS }, { NULL, 14 } }
#define GRP7 NULL, { { NULL, USE_GROUPS }, { NULL, 15 } }
#define GRP8 NULL, { { NULL, USE_GROUPS }, { NULL, 16 } }
#define GRP9 NULL, { { NULL, USE_GROUPS }, { NULL, 17 } }
#define GRP11_C6 NULL, { { NULL, USE_GROUPS }, { NULL, 18 } }
#define GRP11_C7 NULL, { { NULL, USE_GROUPS }, { NULL, 19 } }
#define GRP12 NULL, { { NULL, USE_GROUPS }, { NULL, 20 } }
#define GRP13 NULL, { { NULL, USE_GROUPS }, { NULL, 21 } }
#define GRP14 NULL, { { NULL, USE_GROUPS }, { NULL, 22 } }
#define GRP15 NULL, { { NULL, USE_GROUPS }, { NULL, 23 } }
#define GRP16 NULL, { { NULL, USE_GROUPS }, { NULL, 24 } }
#define GRPAMD NULL, { { NULL, USE_GROUPS }, { NULL, 25 } }
#define GRPPADLCK1 NULL, { { NULL, USE_GROUPS }, { NULL, 26 } }
#define GRPPADLCK2 NULL, { { NULL, USE_GROUPS }, { NULL, 27 } }
#define PREGRP0 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 0 } }
#define PREGRP1 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 1 } }
#define PREGRP2 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 2 } }
#define PREGRP3 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 3 } }
#define PREGRP4 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 4 } }
#define PREGRP5 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 5 } }
#define PREGRP6 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 6 } }
#define PREGRP7 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 7 } }
#define PREGRP8 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 8 } }
#define PREGRP9 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 9 } }
#define PREGRP10 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 10 } }
#define PREGRP11 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 11 } }
#define PREGRP12 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 12 } }
#define PREGRP13 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 13 } }
#define PREGRP14 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 14 } }
#define PREGRP15 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 15 } }
#define PREGRP16 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 16 } }
#define PREGRP17 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 17 } }
#define PREGRP18 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 18 } }
#define PREGRP19 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 19 } }
#define PREGRP20 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 20 } }
#define PREGRP21 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 21 } }
#define PREGRP22 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 22 } }
#define PREGRP23 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 23 } }
#define PREGRP24 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 24 } }
#define PREGRP25 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 25 } }
#define PREGRP26 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 26 } }
#define PREGRP27 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 27 } }
#define PREGRP28 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 28 } }
#define PREGRP29 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 29 } }
#define PREGRP30 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 30 } }
#define PREGRP31 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 31 } }
#define PREGRP32 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 32 } }
#define PREGRP33 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 33 } }
#define PREGRP34 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 34 } }
#define PREGRP35 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 35 } }
#define PREGRP36 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 36 } }
#define PREGRP37 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 37 } }
#define PREGRP38 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 38 } }
#define PREGRP39 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 39 } }
#define PREGRP40 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 40 } }
#define PREGRP41 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 41 } }
#define PREGRP42 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 42 } }
#define PREGRP43 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 43 } }
#define PREGRP44 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 44 } }
#define PREGRP45 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 45 } }
#define PREGRP46 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 46 } }
#define PREGRP47 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 47 } }
#define PREGRP48 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 48 } }
#define PREGRP49 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 49 } }
#define PREGRP50 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 50 } }
#define PREGRP51 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 51 } }
#define PREGRP52 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 52 } }
#define PREGRP53 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 53 } }
#define PREGRP54 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 54 } }
#define PREGRP55 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 55 } }
#define PREGRP56 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 56 } }
#define PREGRP57 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 57 } }
#define PREGRP58 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 58 } }
#define PREGRP59 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 59 } }
#define PREGRP60 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 60 } }
#define PREGRP61 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 61 } }
#define PREGRP62 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 62 } }
#define PREGRP63 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 63 } }
#define PREGRP64 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 64 } }
#define PREGRP65 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 65 } }
#define PREGRP66 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 66 } }
#define PREGRP67 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 67 } }
#define PREGRP68 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 68 } }
#define PREGRP69 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 69 } }
#define PREGRP70 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 70 } }
#define PREGRP71 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 71 } }
#define PREGRP72 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 72 } }
#define PREGRP73 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 73 } }
#define PREGRP74 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 74 } }
#define PREGRP75 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 75 } }
#define PREGRP76 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 76 } }
#define PREGRP77 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 77 } }
#define PREGRP78 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 78 } }
#define PREGRP79 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 79 } }
#define PREGRP80 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 80 } }
#define PREGRP81 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 81 } }
#define PREGRP82 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 82 } }
#define PREGRP83 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 83 } }
#define PREGRP84 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 84 } }
#define PREGRP85 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 85 } }
#define PREGRP86 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 86 } }
#define PREGRP87 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 87 } }
#define PREGRP88 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 88 } }
#define PREGRP89 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 89 } }
#define PREGRP90 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 90 } }
#define PREGRP91 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 91 } }
#define PREGRP92 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 92 } }
#define PREGRP93 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 93 } }
#define PREGRP94 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 94 } }
#define PREGRP95 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 95 } }
#define PREGRP96 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 96 } }
#define PREGRP97 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 97 } }
#define PREGRP98 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 98 } }
#define PREGRP99 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 99 } }
#define PREGRP100 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 100 } }
#define PREGRP101 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 101 } }
#define PREGRP102 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 102 } }
#define PREGRP103 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 103 } }
#define PREGRP104 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 104 } }
#define PREGRP105 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 105 } }
#define PREGRP106 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 106 } }
#define X86_64_0 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 0 } }
#define X86_64_1 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 1 } }
#define X86_64_2 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 2 } }
#define X86_64_3 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 3 } }
#define THREE_BYTE_0 NULL, { { NULL, IS_3BYTE_OPCODE }, { NULL, 0 } }
#define THREE_BYTE_1 NULL, { { NULL, IS_3BYTE_OPCODE }, { NULL, 1 } }
typedef void (*op_rtn) (int bytemode, int sizeflag);
struct dis386 {
const char *name;
struct
{
op_rtn rtn;
int bytemode;
} op[MAX_OPERANDS];
};
/* Upper case letters in the instruction names here are macros.
'A' => print 'b' if no register operands or suffix_always is true
'B' => print 'b' if suffix_always is true
'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand
. size prefix
'D' => print 'w' if no register operands or 'w', 'l' or 'q', if
. suffix_always is true
'E' => print 'e' if 32-bit form of jcxz
'F' => print 'w' or 'l' depending on address size prefix (loop insns)
'G' => print 'w' or 'l' depending on operand size prefix (i/o insns)
'H' => print ",pt" or ",pn" branch hint
'I' => honor following macro letter even in Intel mode (implemented only
. for some of the macro letters)
'J' => print 'l'
'K' => print 'd' or 'q' if rex prefix is present.
'L' => print 'l' if suffix_always is true
'N' => print 'n' if instruction has no wait "prefix"
'O' => print 'd' or 'o' (or 'q' in Intel mode)
'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix,
. or suffix_always is true. print 'q' if rex prefix is present.
'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always
. is true
'R' => print 'w', 'l' or 'q' ('d' for 'l' and 'e' in Intel mode)
'S' => print 'w', 'l' or 'q' if suffix_always is true
'T' => print 'q' in 64bit mode and behave as 'P' otherwise
'U' => print 'q' in 64bit mode and behave as 'Q' otherwise
'V' => print 'q' in 64bit mode and behave as 'S' otherwise
'W' => print 'b', 'w' or 'l' ('d' in Intel mode)
'X' => print 's', 'd' depending on data16 prefix (for XMM)
'Y' => 'q' if instruction has an REX 64bit overwrite prefix
'Z' => print 'q' in 64bit mode and behave as 'L' otherwise
Many of the above letters print nothing in Intel mode. See "putop"
for the details.
Braces '{' and '}', and vertical bars '|', indicate alternative
mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel
modes. In cases where there are only two alternatives, the X86_64
instruction is reserved, and "(bad)" is printed.
*/
static const struct dis386 dis386[] = {
/* 00 */
{ "addB", { Eb, Gb } },
{ "addS", { Ev, Gv } },
{ "addB", { Gb, Eb } },
{ "addS", { Gv, Ev } },
{ "addB", { AL, Ib } },
{ "addS", { eAX, Iv } },
{ "push{T|}", { es } },
{ "pop{T|}", { es } },
/* 08 */
{ "orB", { Eb, Gb } },
{ "orS", { Ev, Gv } },
{ "orB", { Gb, Eb } },
{ "orS", { Gv, Ev } },
{ "orB", { AL, Ib } },
{ "orS", { eAX, Iv } },
{ "push{T|}", { cs } },
{ "(bad)", { XX } }, /* 0x0f extended opcode escape */
/* 10 */
{ "adcB", { Eb, Gb } },
{ "adcS", { Ev, Gv } },
{ "adcB", { Gb, Eb } },
{ "adcS", { Gv, Ev } },
{ "adcB", { AL, Ib } },
{ "adcS", { eAX, Iv } },
{ "push{T|}", { ss } },
{ "pop{T|}", { ss } },
/* 18 */
{ "sbbB", { Eb, Gb } },
{ "sbbS", { Ev, Gv } },
{ "sbbB", { Gb, Eb } },
{ "sbbS", { Gv, Ev } },
{ "sbbB", { AL, Ib } },
{ "sbbS", { eAX, Iv } },
{ "push{T|}", { ds } },
{ "pop{T|}", { ds } },
/* 20 */
{ "andB", { Eb, Gb } },
{ "andS", { Ev, Gv } },
{ "andB", { Gb, Eb } },
{ "andS", { Gv, Ev } },
{ "andB", { AL, Ib } },
{ "andS", { eAX, Iv } },
{ "(bad)", { XX } }, /* SEG ES prefix */
{ "daa{|}", { XX } },
/* 28 */
{ "subB", { Eb, Gb } },
{ "subS", { Ev, Gv } },
{ "subB", { Gb, Eb } },
{ "subS", { Gv, Ev } },
{ "subB", { AL, Ib } },
{ "subS", { eAX, Iv } },
{ "(bad)", { XX } }, /* SEG CS prefix */
{ "das{|}", { XX } },
/* 30 */
{ "xorB", { Eb, Gb } },
{ "xorS", { Ev, Gv } },
{ "xorB", { Gb, Eb } },
{ "xorS", { Gv, Ev } },
{ "xorB", { AL, Ib } },
{ "xorS", { eAX, Iv } },
{ "(bad)", { XX } }, /* SEG SS prefix */
{ "aaa{|}", { XX } },
/* 38 */
{ "cmpB", { Eb, Gb } },
{ "cmpS", { Ev, Gv } },
{ "cmpB", { Gb, Eb } },
{ "cmpS", { Gv, Ev } },
{ "cmpB", { AL, Ib } },
{ "cmpS", { eAX, Iv } },
{ "(bad)", { XX } }, /* SEG DS prefix */
{ "aas{|}", { XX } },
/* 40 */
{ "inc{S|}", { RMeAX } },
{ "inc{S|}", { RMeCX } },
{ "inc{S|}", { RMeDX } },
{ "inc{S|}", { RMeBX } },
{ "inc{S|}", { RMeSP } },
{ "inc{S|}", { RMeBP } },
{ "inc{S|}", { RMeSI } },
{ "inc{S|}", { RMeDI } },
/* 48 */
{ "dec{S|}", { RMeAX } },
{ "dec{S|}", { RMeCX } },
{ "dec{S|}", { RMeDX } },
{ "dec{S|}", { RMeBX } },
{ "dec{S|}", { RMeSP } },
{ "dec{S|}", { RMeBP } },
{ "dec{S|}", { RMeSI } },
{ "dec{S|}", { RMeDI } },
/* 50 */
{ "pushV", { RMrAX } },
{ "pushV", { RMrCX } },
{ "pushV", { RMrDX } },
{ "pushV", { RMrBX } },
{ "pushV", { RMrSP } },
{ "pushV", { RMrBP } },
{ "pushV", { RMrSI } },
{ "pushV", { RMrDI } },
/* 58 */
{ "popV", { RMrAX } },
{ "popV", { RMrCX } },
{ "popV", { RMrDX } },
{ "popV", { RMrBX } },
{ "popV", { RMrSP } },
{ "popV", { RMrBP } },
{ "popV", { RMrSI } },
{ "popV", { RMrDI } },
/* 60 */
{ X86_64_0 },
{ X86_64_1 },
{ X86_64_2 },
{ X86_64_3 },
{ "(bad)", { XX } }, /* seg fs */
{ "(bad)", { XX } }, /* seg gs */
{ "(bad)", { XX } }, /* op size prefix */
{ "(bad)", { XX } }, /* adr size prefix */
/* 68 */
{ "pushT", { Iq } },
{ "imulS", { Gv, Ev, Iv } },
{ "pushT", { sIb } },
{ "imulS", { Gv, Ev, sIb } },
{ "ins{b||b|}", { Ybr, indirDX } },
{ "ins{R||G|}", { Yzr, indirDX } },
{ "outs{b||b|}", { indirDXr, Xb } },
{ "outs{R||G|}", { indirDXr, Xz } },
/* 70 */
{ "joH", { Jb, XX, cond_jump_flag } },
{ "jnoH", { Jb, XX, cond_jump_flag } },
{ "jbH", { Jb, XX, cond_jump_flag } },
{ "jaeH", { Jb, XX, cond_jump_flag } },
{ "jeH", { Jb, XX, cond_jump_flag } },
{ "jneH", { Jb, XX, cond_jump_flag } },
{ "jbeH", { Jb, XX, cond_jump_flag } },
{ "jaH", { Jb, XX, cond_jump_flag } },
/* 78 */
{ "jsH", { Jb, XX, cond_jump_flag } },
{ "jnsH", { Jb, XX, cond_jump_flag } },
{ "jpH", { Jb, XX, cond_jump_flag } },
{ "jnpH", { Jb, XX, cond_jump_flag } },
{ "jlH", { Jb, XX, cond_jump_flag } },
{ "jgeH", { Jb, XX, cond_jump_flag } },
{ "jleH", { Jb, XX, cond_jump_flag } },
{ "jgH", { Jb, XX, cond_jump_flag } },
/* 80 */
{ GRP1b },
{ GRP1S },
{ "(bad)", { XX } },
{ GRP1Ss },
{ "testB", { Eb, Gb } },
{ "testS", { Ev, Gv } },
{ "xchgB", { Eb, Gb } },
{ "xchgS", { Ev, Gv } },
/* 88 */
{ "movB", { Eb, Gb } },
{ "movS", { Ev, Gv } },
{ "movB", { Gb, Eb } },
{ "movS", { Gv, Ev } },
{ "movD", { Sv, Sw } },
{ "leaS", { Gv, M } },
{ "movD", { Sw, Sv } },
{ GRP1a },
/* 90 */
{ PREGRP38 },
{ "xchgS", { RMeCX, eAX } },
{ "xchgS", { RMeDX, eAX } },
{ "xchgS", { RMeBX, eAX } },
{ "xchgS", { RMeSP, eAX } },
{ "xchgS", { RMeBP, eAX } },
{ "xchgS", { RMeSI, eAX } },
{ "xchgS", { RMeDI, eAX } },
/* 98 */
{ "cW{t||t|}R", { XX } },
{ "cR{t||t|}O", { XX } },
{ "Jcall{T|}", { Ap } },
{ "(bad)", { XX } }, /* fwait */
{ "pushfT", { XX } },
{ "popfT", { XX } },
{ "sahf{|}", { XX } },
{ "lahf{|}", { XX } },
/* a0 */
{ "movB", { AL, Ob } },
{ "movS", { eAX, Ov } },
{ "movB", { Ob, AL } },
{ "movS", { Ov, eAX } },
{ "movs{b||b|}", { Ybr, Xb } },
{ "movs{R||R|}", { Yvr, Xv } },
{ "cmps{b||b|}", { Xb, Yb } },
{ "cmps{R||R|}", { Xv, Yv } },
/* a8 */
{ "testB", { AL, Ib } },
{ "testS", { eAX, Iv } },
{ "stosB", { Ybr, AL } },
{ "stosS", { Yvr, eAX } },
{ "lodsB", { ALr, Xb } },
{ "lodsS", { eAXr, Xv } },
{ "scasB", { AL, Yb } },
{ "scasS", { eAX, Yv } },
/* b0 */
{ "movB", { RMAL, Ib } },
{ "movB", { RMCL, Ib } },
{ "movB", { RMDL, Ib } },
{ "movB", { RMBL, Ib } },
{ "movB", { RMAH, Ib } },
{ "movB", { RMCH, Ib } },
{ "movB", { RMDH, Ib } },
{ "movB", { RMBH, Ib } },
/* b8 */
{ "movS", { RMeAX, Iv64 } },
{ "movS", { RMeCX, Iv64 } },
{ "movS", { RMeDX, Iv64 } },
{ "movS", { RMeBX, Iv64 } },
{ "movS", { RMeSP, Iv64 } },
{ "movS", { RMeBP, Iv64 } },
{ "movS", { RMeSI, Iv64 } },
{ "movS", { RMeDI, Iv64 } },
/* c0 */
{ GRP2b },
{ GRP2S },
{ "retT", { Iw } },
{ "retT", { XX } },
{ "les{S|}", { Gv, Mp } },
{ "ldsS", { Gv, Mp } },
{ GRP11_C6 },
{ GRP11_C7 },
/* c8 */
{ "enterT", { Iw, Ib } },
{ "leaveT", { XX } },
{ "lretP", { Iw } },
{ "lretP", { XX } },
{ "int3", { XX } },
{ "int", { Ib } },
{ "into{|}", { XX } },
{ "iretP", { XX } },
/* d0 */
{ GRP2b_one },
{ GRP2S_one },
{ GRP2b_cl },
{ GRP2S_cl },
{ "aam{|}", { sIb } },
{ "aad{|}", { sIb } },
{ "(bad)", { XX } },
{ "xlat", { DSBX } },
/* d8 */
{ FLOAT },
{ FLOAT },
{ FLOAT },
{ FLOAT },
{ FLOAT },
{ FLOAT },
{ FLOAT },
{ FLOAT },
/* e0 */
{ "loopneFH", { Jb, XX, loop_jcxz_flag } },
{ "loopeFH", { Jb, XX, loop_jcxz_flag } },
{ "loopFH", { Jb, XX, loop_jcxz_flag } },
{ "jEcxzH", { Jb, XX, loop_jcxz_flag } },
{ "inB", { AL, Ib } },
{ "inG", { zAX, Ib } },
{ "outB", { Ib, AL } },
{ "outG", { Ib, zAX } },
/* e8 */
{ "callT", { Jv } },
{ "jmpT", { Jv } },
{ "Jjmp{T|}", { Ap } },
{ "jmp", { Jb } },
{ "inB", { AL, indirDX } },
{ "inG", { zAX, indirDX } },
{ "outB", { indirDX, AL } },
{ "outG", { indirDX, zAX } },
/* f0 */
{ "(bad)", { XX } }, /* lock prefix */
{ "icebp", { XX } },
{ "(bad)", { XX } }, /* repne */
{ "(bad)", { XX } }, /* repz */
{ "hlt", { XX } },
{ "cmc", { XX } },
{ GRP3b },
{ GRP3S },
/* f8 */
{ "clc", { XX } },
{ "stc", { XX } },
{ "cli", { XX } },
{ "sti", { XX } },
{ "cld", { XX } },
{ "std", { XX } },
{ GRP4 },
{ GRP5 },
};
static const struct dis386 dis386_twobyte[] = {
/* 00 */
{ GRP6 },
{ GRP7 },
{ "larS", { Gv, Ew } },
{ "lslS", { Gv, Ew } },
{ "(bad)", { XX } },
{ "syscall", { XX } },
{ "clts", { XX } },
{ "sysretP", { XX } },
/* 08 */
{ "invd", { XX } },
{ "wbinvd", { XX } },
{ "(bad)", { XX } },
{ "ud2a", { XX } },
{ "(bad)", { XX } },
{ GRPAMD },
{ "femms", { XX } },
{ "", { MX, EM, OPSUF } }, /* See OP_3DNowSuffix. */
/* 10 */
{ PREGRP8 },
{ PREGRP9 },
{ PREGRP30 },
{ "movlpX", { EXq, XM, { SIMD_Fixup, 'h' } } },
{ "unpcklpX", { XM, EXq } },
{ "unpckhpX", { XM, EXq } },
{ PREGRP31 },
{ "movhpX", { EXq, XM, { SIMD_Fixup, 'l' } } },
/* 18 */
{ GRP16 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "nopQ", { Ev } },
/* 20 */
{ "movZ", { Rm, Cm } },
{ "movZ", { Rm, Dm } },
{ "movZ", { Cm, Rm } },
{ "movZ", { Dm, Rm } },
{ "movL", { Rd, Td } },
{ "(bad)", { XX } },
{ "movL", { Td, Rd } },
{ "(bad)", { XX } },
/* 28 */
{ "movapX", { XM, EXx } },
{ "movapX", { EXx, XM } },
{ PREGRP2 },
{ PREGRP33 },
{ PREGRP4 },
{ PREGRP3 },
{ PREGRP93 },
{ PREGRP94 },
/* 30 */
{ "wrmsr", { XX } },
{ "rdtsc", { XX } },
{ "rdmsr", { XX } },
{ "rdpmc", { XX } },
{ "sysenter", { XX } },
{ "sysexit", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 38 */
{ THREE_BYTE_0 },
{ "(bad)", { XX } },
{ THREE_BYTE_1 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 40 */
{ "cmovo", { Gv, Ev } },
{ "cmovno", { Gv, Ev } },
{ "cmovb", { Gv, Ev } },
{ "cmovae", { Gv, Ev } },
{ "cmove", { Gv, Ev } },
{ "cmovne", { Gv, Ev } },
{ "cmovbe", { Gv, Ev } },
{ "cmova", { Gv, Ev } },
/* 48 */
{ "cmovs", { Gv, Ev } },
{ "cmovns", { Gv, Ev } },
{ "cmovp", { Gv, Ev } },
{ "cmovnp", { Gv, Ev } },
{ "cmovl", { Gv, Ev } },
{ "cmovge", { Gv, Ev } },
{ "cmovle", { Gv, Ev } },
{ "cmovg", { Gv, Ev } },
/* 50 */
{ "movmskpX", { Gdq, XS } },
{ PREGRP13 },
{ PREGRP12 },
{ PREGRP11 },
{ "andpX", { XM, EXx } },
{ "andnpX", { XM, EXx } },
{ "orpX", { XM, EXx } },
{ "xorpX", { XM, EXx } },
/* 58 */
{ PREGRP0 },
{ PREGRP10 },
{ PREGRP17 },
{ PREGRP16 },
{ PREGRP14 },
{ PREGRP7 },
{ PREGRP5 },
{ PREGRP6 },
/* 60 */
{ PREGRP95 },
{ PREGRP96 },
{ PREGRP97 },
{ "packsswb", { MX, EM } },
{ "pcmpgtb", { MX, EM } },
{ "pcmpgtw", { MX, EM } },
{ "pcmpgtd", { MX, EM } },
{ "packuswb", { MX, EM } },
/* 68 */
{ "punpckhbw", { MX, EM } },
{ "punpckhwd", { MX, EM } },
{ "punpckhdq", { MX, EM } },
{ "packssdw", { MX, EM } },
{ PREGRP26 },
{ PREGRP24 },
{ "movd", { MX, Edq } },
{ PREGRP19 },
/* 70 */
{ PREGRP22 },
{ GRP12 },
{ GRP13 },
{ GRP14 },
{ "pcmpeqb", { MX, EM } },
{ "pcmpeqw", { MX, EM } },
{ "pcmpeqd", { MX, EM } },
{ "emms", { XX } },
/* 78 */
{ PREGRP34 },
{ PREGRP35 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP28 },
{ PREGRP29 },
{ PREGRP23 },
{ PREGRP20 },
/* 80 */
{ "joH", { Jv, XX, cond_jump_flag } },
{ "jnoH", { Jv, XX, cond_jump_flag } },
{ "jbH", { Jv, XX, cond_jump_flag } },
{ "jaeH", { Jv, XX, cond_jump_flag } },
{ "jeH", { Jv, XX, cond_jump_flag } },
{ "jneH", { Jv, XX, cond_jump_flag } },
{ "jbeH", { Jv, XX, cond_jump_flag } },
{ "jaH", { Jv, XX, cond_jump_flag } },
/* 88 */
{ "jsH", { Jv, XX, cond_jump_flag } },
{ "jnsH", { Jv, XX, cond_jump_flag } },
{ "jpH", { Jv, XX, cond_jump_flag } },
{ "jnpH", { Jv, XX, cond_jump_flag } },
{ "jlH", { Jv, XX, cond_jump_flag } },
{ "jgeH", { Jv, XX, cond_jump_flag } },
{ "jleH", { Jv, XX, cond_jump_flag } },
{ "jgH", { Jv, XX, cond_jump_flag } },
/* 90 */
{ "seto", { Eb } },
{ "setno", { Eb } },
{ "setb", { Eb } },
{ "setae", { Eb } },
{ "sete", { Eb } },
{ "setne", { Eb } },
{ "setbe", { Eb } },
{ "seta", { Eb } },
/* 98 */
{ "sets", { Eb } },
{ "setns", { Eb } },
{ "setp", { Eb } },
{ "setnp", { Eb } },
{ "setl", { Eb } },
{ "setge", { Eb } },
{ "setle", { Eb } },
{ "setg", { Eb } },
/* a0 */
{ "pushT", { fs } },
{ "popT", { fs } },
{ "cpuid", { XX } },
{ "btS", { Ev, Gv } },
{ "shldS", { Ev, Gv, Ib } },
{ "shldS", { Ev, Gv, CL } },
{ GRPPADLCK2 },
{ GRPPADLCK1 },
/* a8 */
{ "pushT", { gs } },
{ "popT", { gs } },
{ "rsm", { XX } },
{ "btsS", { Ev, Gv } },
{ "shrdS", { Ev, Gv, Ib } },
{ "shrdS", { Ev, Gv, CL } },
{ GRP15 },
{ "imulS", { Gv, Ev } },
/* b0 */
{ "cmpxchgB", { Eb, Gb } },
{ "cmpxchgS", { Ev, Gv } },
{ "lssS", { Gv, Mp } },
{ "btrS", { Ev, Gv } },
{ "lfsS", { Gv, Mp } },
{ "lgsS", { Gv, Mp } },
{ "movz{bR|x|bR|x}", { Gv, Eb } },
{ "movz{wR|x|wR|x}", { Gv, Ew } }, /* yes, there really is movzww ! */
/* b8 */
{ PREGRP37 },
{ "ud2b", { XX } },
{ GRP8 },
{ "btcS", { Ev, Gv } },
{ "bsfS", { Gv, Ev } },
{ PREGRP36 },
{ "movs{bR|x|bR|x}", { Gv, Eb } },
{ "movs{wR|x|wR|x}", { Gv, Ew } }, /* yes, there really is movsww ! */
/* c0 */
{ "xaddB", { Eb, Gb } },
{ "xaddS", { Ev, Gv } },
{ PREGRP1 },
{ "movntiS", { Ev, Gv } },
{ "pinsrw", { MX, Edqw, Ib } },
{ "pextrw", { Gdq, MS, Ib } },
{ "shufpX", { XM, EXx, Ib } },
{ GRP9 },
/* c8 */
{ "bswap", { RMeAX } },
{ "bswap", { RMeCX } },
{ "bswap", { RMeDX } },
{ "bswap", { RMeBX } },
{ "bswap", { RMeSP } },
{ "bswap", { RMeBP } },
{ "bswap", { RMeSI } },
{ "bswap", { RMeDI } },
/* d0 */
{ PREGRP27 },
{ "psrlw", { MX, EM } },
{ "psrld", { MX, EM } },
{ "psrlq", { MX, EM } },
{ "paddq", { MX, EM } },
{ "pmullw", { MX, EM } },
{ PREGRP21 },
{ "pmovmskb", { Gdq, MS } },
/* d8 */
{ "psubusb", { MX, EM } },
{ "psubusw", { MX, EM } },
{ "pminub", { MX, EM } },
{ "pand", { MX, EM } },
{ "paddusb", { MX, EM } },
{ "paddusw", { MX, EM } },
{ "pmaxub", { MX, EM } },
{ "pandn", { MX, EM } },
/* e0 */
{ "pavgb", { MX, EM } },
{ "psraw", { MX, EM } },
{ "psrad", { MX, EM } },
{ "pavgw", { MX, EM } },
{ "pmulhuw", { MX, EM } },
{ "pmulhw", { MX, EM } },
{ PREGRP15 },
{ PREGRP25 },
/* e8 */
{ "psubsb", { MX, EM } },
{ "psubsw", { MX, EM } },
{ "pminsw", { MX, EM } },
{ "por", { MX, EM } },
{ "paddsb", { MX, EM } },
{ "paddsw", { MX, EM } },
{ "pmaxsw", { MX, EM } },
{ "pxor", { MX, EM } },
/* f0 */
{ PREGRP32 },
{ "psllw", { MX, EM } },
{ "pslld", { MX, EM } },
{ "psllq", { MX, EM } },
{ "pmuludq", { MX, EM } },
{ "pmaddwd", { MX, EM } },
{ "psadbw", { MX, EM } },
{ PREGRP18 },
/* f8 */
{ "psubb", { MX, EM } },
{ "psubw", { MX, EM } },
{ "psubd", { MX, EM } },
{ "psubq", { MX, EM } },
{ "paddb", { MX, EM } },
{ "paddw", { MX, EM } },
{ "paddd", { MX, EM } },
{ "(bad)", { XX } },
};
static const unsigned char onebyte_has_modrm[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
/* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
/* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
/* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
/* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
/* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
/* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
/* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
/* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
static const unsigned char twobyte_has_modrm[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
/* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1, /* 1f */
/* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
/* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
/* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
/* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
/* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
/* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
/* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
/* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
/* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
/* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
static const unsigned char twobyte_uses_DATA_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */
/* 70 */ 1,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
static const unsigned char twobyte_uses_REPNZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,1,0,0,0,0,0,0,1,1,1,0,1,1,1,1, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 1,0,0,0,0,0,0,0,1,1,0,0,1,1,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
static const unsigned char twobyte_uses_REPZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, /* 6f */
/* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, /* bf */
/* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 38 XX uses DATA prefix. */
static const unsigned char threebyte_0x38_uses_DATA_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0, /* 0f */
/* 10 */ 1,0,0,0,1,1,0,1,0,0,0,0,1,1,1,0, /* 1f */
/* 20 */ 1,1,1,1,1,1,0,0,1,1,1,1,0,0,0,0, /* 2f */
/* 30 */ 1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1, /* 3f */
/* 40 */ 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 38 XX uses REPNZ prefix. */
static const unsigned char threebyte_0x38_uses_REPNZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 38 XX uses REPZ prefix. */
static const unsigned char threebyte_0x38_uses_REPZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 3a XX uses DATA prefix. */
static const unsigned char threebyte_0x3a_uses_DATA_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1, /* 0f */
/* 10 */ 0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 3a XX uses REPNZ prefix. */
static const unsigned char threebyte_0x3a_uses_REPNZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
/* This is used to determine if opcode 0f 3a XX uses REPZ prefix. */
static const unsigned char threebyte_0x3a_uses_REPZ_prefix[256] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ------------------------------- */
/* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
/* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */
/* 20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 2f */
/* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
/* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
/* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */
/* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 6f */
/* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 7f */
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
/* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
/* c0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ef */
/* f0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* ff */
/* ------------------------------- */
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
};
static char obuf[100];
static char *obufp;
static char scratchbuf[100];
static unsigned char *start_codep;
static unsigned char *insn_codep;
static unsigned char *codep;
static disassemble_info *the_info;
static struct
{
int mod;
int reg;
int rm;
}
modrm;
static unsigned char need_modrm;
/* If we are accessing mod/rm/reg without need_modrm set, then the
values are stale. Hitting this abort likely indicates that you
need to update onebyte_has_modrm or twobyte_has_modrm. */
#define MODRM_CHECK if (!need_modrm) abort ()
static const char * const *names64;
static const char * const *names32;
static const char * const *names16;
static const char * const *names8;
static const char * const *names8rex;
static const char * const *names_seg;
static const char * const *index16;
static const char * const intel_names64[] = {
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
};
static const char * const intel_names32[] = {
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
"r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
};
static const char * const intel_names16[] = {
"ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
"r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
};
static const char * const intel_names8[] = {
"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
};
static const char * const intel_names8rex[] = {
"al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
"r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b"
};
static const char * const intel_names_seg[] = {
"es", "cs", "ss", "ds", "fs", "gs", "?", "?",
};
static const char * const intel_index16[] = {
"bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx"
};
static const char * const att_names64[] = {
"%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
};
static const char * const att_names32[] = {
"%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
"%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
};
static const char * const att_names16[] = {
"%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
"%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
};
static const char * const att_names8[] = {
"%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
};
static const char * const att_names8rex[] = {
"%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil",
"%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
};
static const char * const att_names_seg[] = {
"%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?",
};
static const char * const att_index16[] = {
"%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx"
};
static const struct dis386 grps[][8] = {
/* GRP1a */
{
{ "popU", { stackEv } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRP1b */
{
{ "addA", { Eb, Ib } },
{ "orA", { Eb, Ib } },
{ "adcA", { Eb, Ib } },
{ "sbbA", { Eb, Ib } },
{ "andA", { Eb, Ib } },
{ "subA", { Eb, Ib } },
{ "xorA", { Eb, Ib } },
{ "cmpA", { Eb, Ib } },
},
/* GRP1S */
{
{ "addQ", { Ev, Iv } },
{ "orQ", { Ev, Iv } },
{ "adcQ", { Ev, Iv } },
{ "sbbQ", { Ev, Iv } },
{ "andQ", { Ev, Iv } },
{ "subQ", { Ev, Iv } },
{ "xorQ", { Ev, Iv } },
{ "cmpQ", { Ev, Iv } },
},
/* GRP1Ss */
{
{ "addQ", { Ev, sIb } },
{ "orQ", { Ev, sIb } },
{ "adcQ", { Ev, sIb } },
{ "sbbQ", { Ev, sIb } },
{ "andQ", { Ev, sIb } },
{ "subQ", { Ev, sIb } },
{ "xorQ", { Ev, sIb } },
{ "cmpQ", { Ev, sIb } },
},
/* GRP2b */
{
{ "rolA", { Eb, Ib } },
{ "rorA", { Eb, Ib } },
{ "rclA", { Eb, Ib } },
{ "rcrA", { Eb, Ib } },
{ "shlA", { Eb, Ib } },
{ "shrA", { Eb, Ib } },
{ "(bad)", { XX } },
{ "sarA", { Eb, Ib } },
},
/* GRP2S */
{
{ "rolQ", { Ev, Ib } },
{ "rorQ", { Ev, Ib } },
{ "rclQ", { Ev, Ib } },
{ "rcrQ", { Ev, Ib } },
{ "shlQ", { Ev, Ib } },
{ "shrQ", { Ev, Ib } },
{ "(bad)", { XX } },
{ "sarQ", { Ev, Ib } },
},
/* GRP2b_one */
{
{ "rolA", { Eb, I1 } },
{ "rorA", { Eb, I1 } },
{ "rclA", { Eb, I1 } },
{ "rcrA", { Eb, I1 } },
{ "shlA", { Eb, I1 } },
{ "shrA", { Eb, I1 } },
{ "(bad)", { XX } },
{ "sarA", { Eb, I1 } },
},
/* GRP2S_one */
{
{ "rolQ", { Ev, I1 } },
{ "rorQ", { Ev, I1 } },
{ "rclQ", { Ev, I1 } },
{ "rcrQ", { Ev, I1 } },
{ "shlQ", { Ev, I1 } },
{ "shrQ", { Ev, I1 } },
{ "(bad)", { XX } },
{ "sarQ", { Ev, I1 } },
},
/* GRP2b_cl */
{
{ "rolA", { Eb, CL } },
{ "rorA", { Eb, CL } },
{ "rclA", { Eb, CL } },
{ "rcrA", { Eb, CL } },
{ "shlA", { Eb, CL } },
{ "shrA", { Eb, CL } },
{ "(bad)", { XX } },
{ "sarA", { Eb, CL } },
},
/* GRP2S_cl */
{
{ "rolQ", { Ev, CL } },
{ "rorQ", { Ev, CL } },
{ "rclQ", { Ev, CL } },
{ "rcrQ", { Ev, CL } },
{ "shlQ", { Ev, CL } },
{ "shrQ", { Ev, CL } },
{ "(bad)", { XX } },
{ "sarQ", { Ev, CL } },
},
/* GRP3b */
{
{ "testA", { Eb, Ib } },
{ "(bad)", { Eb } },
{ "notA", { Eb } },
{ "negA", { Eb } },
{ "mulA", { Eb } }, /* Don't print the implicit %al register, */
{ "imulA", { Eb } }, /* to distinguish these opcodes from other */
{ "divA", { Eb } }, /* mul/imul opcodes. Do the same for div */
{ "idivA", { Eb } }, /* and idiv for consistency. */
},
/* GRP3S */
{
{ "testQ", { Ev, Iv } },
{ "(bad)", { XX } },
{ "notQ", { Ev } },
{ "negQ", { Ev } },
{ "mulQ", { Ev } }, /* Don't print the implicit register. */
{ "imulQ", { Ev } },
{ "divQ", { Ev } },
{ "idivQ", { Ev } },
},
/* GRP4 */
{
{ "incA", { Eb } },
{ "decA", { Eb } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRP5 */
{
{ "incQ", { Ev } },
{ "decQ", { Ev } },
{ "callT", { indirEv } },
{ "JcallT", { indirEp } },
{ "jmpT", { indirEv } },
{ "JjmpT", { indirEp } },
{ "pushU", { stackEv } },
{ "(bad)", { XX } },
},
/* GRP6 */
{
{ "sldtD", { Sv } },
{ "strD", { Sv } },
{ "lldt", { Ew } },
{ "ltr", { Ew } },
{ "verr", { Ew } },
{ "verw", { Ew } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRP7 */
{
{ "sgdt{Q|IQ||}", { { VMX_Fixup, 0 } } },
{ "sidt{Q|IQ||}", { { PNI_Fixup, 0 } } },
{ "lgdt{Q|Q||}", { M } },
{ "lidt{Q|Q||}", { { SVME_Fixup, 0 } } },
{ "smswD", { Sv } },
{ "(bad)", { XX } },
{ "lmsw", { Ew } },
{ "invlpg", { { INVLPG_Fixup, w_mode } } },
},
/* GRP8 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "btQ", { Ev, Ib } },
{ "btsQ", { Ev, Ib } },
{ "btrQ", { Ev, Ib } },
{ "btcQ", { Ev, Ib } },
},
/* GRP9 */
{
{ "(bad)", { XX } },
{ "cmpxchg8b", { { CMPXCHG8B_Fixup, q_mode } } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "", { VM } }, /* See OP_VMX. */
{ "vmptrst", { Mq } },
},
/* GRP11_C6 */
{
{ "movA", { Eb, Ib } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRP11_C7 */
{
{ "movQ", { Ev, Iv } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRP12 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "psrlw", { MS, Ib } },
{ "(bad)", { XX } },
{ "psraw", { MS, Ib } },
{ "(bad)", { XX } },
{ "psllw", { MS, Ib } },
{ "(bad)", { XX } },
},
/* GRP13 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "psrld", { MS, Ib } },
{ "(bad)", { XX } },
{ "psrad", { MS, Ib } },
{ "(bad)", { XX } },
{ "pslld", { MS, Ib } },
{ "(bad)", { XX } },
},
/* GRP14 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "psrlq", { MS, Ib } },
{ "psrldq", { MS, Ib } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "psllq", { MS, Ib } },
{ "pslldq", { MS, Ib } },
},
/* GRP15 */
{
{ "fxsave", { Ev } },
{ "fxrstor", { Ev } },
{ "ldmxcsr", { Ev } },
{ "stmxcsr", { Ev } },
{ "(bad)", { XX } },
{ "lfence", { { OP_0fae, 0 } } },
{ "mfence", { { OP_0fae, 0 } } },
{ "clflush", { { OP_0fae, 0 } } },
},
/* GRP16 */
{
{ "prefetchnta", { Ev } },
{ "prefetcht0", { Ev } },
{ "prefetcht1", { Ev } },
{ "prefetcht2", { Ev } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRPAMD */
{
{ "prefetch", { Eb } },
{ "prefetchw", { Eb } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* GRPPADLCK1 */
{
{ "xstore-rng", { { OP_0f07, 0 } } },
{ "xcrypt-ecb", { { OP_0f07, 0 } } },
{ "xcrypt-cbc", { { OP_0f07, 0 } } },
{ "xcrypt-ctr", { { OP_0f07, 0 } } },
{ "xcrypt-cfb", { { OP_0f07, 0 } } },
{ "xcrypt-ofb", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
},
/* GRPPADLCK2 */
{
{ "montmul", { { OP_0f07, 0 } } },
{ "xsha1", { { OP_0f07, 0 } } },
{ "xsha256", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
{ "(bad)", { { OP_0f07, 0 } } },
}
};
static const struct dis386 prefix_user_table[][4] = {
/* PREGRP0 */
{
{ "addps", { XM, EXx } },
{ "addss", { XM, EXd } },
{ "addpd", { XM, EXx } },
{ "addsd", { XM, EXq } },
},
/* PREGRP1 */
{
{ "", { XM, EXx, OPSIMD } }, /* See OP_SIMD_SUFFIX. */
{ "", { XM, EXx, OPSIMD } },
{ "", { XM, EXx, OPSIMD } },
{ "", { XM, EXx, OPSIMD } },
},
/* PREGRP2 */
{
{ "cvtpi2ps", { XM, EMC } },
{ "cvtsi2ssY", { XM, Ev } },
{ "cvtpi2pd", { XM, EMC } },
{ "cvtsi2sdY", { XM, Ev } },
},
/* PREGRP3 */
{
{ "cvtps2pi", { MXC, EXx } },
{ "cvtss2siY", { Gv, EXx } },
{ "cvtpd2pi", { MXC, EXx } },
{ "cvtsd2siY", { Gv, EXx } },
},
/* PREGRP4 */
{
{ "cvttps2pi", { MXC, EXx } },
{ "cvttss2siY", { Gv, EXx } },
{ "cvttpd2pi", { MXC, EXx } },
{ "cvttsd2siY", { Gv, EXx } },
},
/* PREGRP5 */
{
{ "divps", { XM, EXx } },
{ "divss", { XM, EXx } },
{ "divpd", { XM, EXx } },
{ "divsd", { XM, EXx } },
},
/* PREGRP6 */
{
{ "maxps", { XM, EXx } },
{ "maxss", { XM, EXx } },
{ "maxpd", { XM, EXx } },
{ "maxsd", { XM, EXx } },
},
/* PREGRP7 */
{
{ "minps", { XM, EXx } },
{ "minss", { XM, EXx } },
{ "minpd", { XM, EXx } },
{ "minsd", { XM, EXx } },
},
/* PREGRP8 */
{
{ "movups", { XM, EXx } },
{ "movss", { XM, EXx } },
{ "movupd", { XM, EXx } },
{ "movsd", { XM, EXx } },
},
/* PREGRP9 */
{
{ "movups", { EXx, XM } },
{ "movss", { EXx, XM } },
{ "movupd", { EXx, XM } },
{ "movsd", { EXx, XM } },
},
/* PREGRP10 */
{
{ "mulps", { XM, EXx } },
{ "mulss", { XM, EXx } },
{ "mulpd", { XM, EXx } },
{ "mulsd", { XM, EXx } },
},
/* PREGRP11 */
{
{ "rcpps", { XM, EXx } },
{ "rcpss", { XM, EXx } },
{ "(bad)", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP12 */
{
{ "rsqrtps",{ XM, EXx } },
{ "rsqrtss",{ XM, EXx } },
{ "(bad)", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP13 */
{
{ "sqrtps", { XM, EXx } },
{ "sqrtss", { XM, EXx } },
{ "sqrtpd", { XM, EXx } },
{ "sqrtsd", { XM, EXx } },
},
/* PREGRP14 */
{
{ "subps", { XM, EXx } },
{ "subss", { XM, EXx } },
{ "subpd", { XM, EXx } },
{ "subsd", { XM, EXx } },
},
/* PREGRP15 */
{
{ "(bad)", { XM, EXx } },
{ "cvtdq2pd", { XM, EXq } },
{ "cvttpd2dq", { XM, EXx } },
{ "cvtpd2dq", { XM, EXx } },
},
/* PREGRP16 */
{
{ "cvtdq2ps", { XM, EXx } },
{ "cvttps2dq", { XM, EXx } },
{ "cvtps2dq", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP17 */
{
{ "cvtps2pd", { XM, EXq } },
{ "cvtss2sd", { XM, EXx } },
{ "cvtpd2ps", { XM, EXx } },
{ "cvtsd2ss", { XM, EXx } },
},
/* PREGRP18 */
{
{ "maskmovq", { MX, MS } },
{ "(bad)", { XM, EXx } },
{ "maskmovdqu", { XM, XS } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP19 */
{
{ "movq", { MX, EM } },
{ "movdqu", { XM, EXx } },
{ "movdqa", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP20 */
{
{ "movq", { EM, MX } },
{ "movdqu", { EXx, XM } },
{ "movdqa", { EXx, XM } },
{ "(bad)", { EXx, XM } },
},
/* PREGRP21 */
{
{ "(bad)", { EXx, XM } },
{ "movq2dq",{ XM, MS } },
{ "movq", { EXx, XM } },
{ "movdq2q",{ MX, XS } },
},
/* PREGRP22 */
{
{ "pshufw", { MX, EM, Ib } },
{ "pshufhw",{ XM, EXx, Ib } },
{ "pshufd", { XM, EXx, Ib } },
{ "pshuflw",{ XM, EXx, Ib } },
},
/* PREGRP23 */
{
{ "movd", { Edq, MX } },
{ "movq", { XM, EXx } },
{ "movd", { Edq, XM } },
{ "(bad)", { Ed, XM } },
},
/* PREGRP24 */
{
{ "(bad)", { MX, EXx } },
{ "(bad)", { XM, EXx } },
{ "punpckhqdq", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP25 */
{
{ "movntq", { EM, MX } },
{ "(bad)", { EM, XM } },
{ "movntdq",{ EM, XM } },
{ "(bad)", { EM, XM } },
},
/* PREGRP26 */
{
{ "(bad)", { MX, EXx } },
{ "(bad)", { XM, EXx } },
{ "punpcklqdq", { XM, EXx } },
{ "(bad)", { XM, EXx } },
},
/* PREGRP27 */
{
{ "(bad)", { MX, EXx } },
{ "(bad)", { XM, EXx } },
{ "addsubpd", { XM, EXx } },
{ "addsubps", { XM, EXx } },
},
/* PREGRP28 */
{
{ "(bad)", { MX, EXx } },
{ "(bad)", { XM, EXx } },
{ "haddpd", { XM, EXx } },
{ "haddps", { XM, EXx } },
},
/* PREGRP29 */
{
{ "(bad)", { MX, EXx } },
{ "(bad)", { XM, EXx } },
{ "hsubpd", { XM, EXx } },
{ "hsubps", { XM, EXx } },
},
/* PREGRP30 */
{
{ "movlpX", { XM, EXq, { SIMD_Fixup, 'h' } } }, /* really only 2 operands */
{ "movsldup", { XM, EXx } },
{ "movlpd", { XM, EXq } },
{ "movddup", { XM, EXq } },
},
/* PREGRP31 */
{
{ "movhpX", { XM, EXq, { SIMD_Fixup, 'l' } } },
{ "movshdup", { XM, EXx } },
{ "movhpd", { XM, EXq } },
{ "(bad)", { XM, EXq } },
},
/* PREGRP32 */
{
{ "(bad)", { XM, EXx } },
{ "(bad)", { XM, EXx } },
{ "(bad)", { XM, EXx } },
{ "lddqu", { XM, M } },
},
/* PREGRP33 */
{
{"movntps", { Ev, XM } },
{"movntss", { Ev, XM } },
{"movntpd", { Ev, XM } },
{"movntsd", { Ev, XM } },
},
/* PREGRP34 */
{
{"vmread", { Em, Gm } },
{"(bad)", { XX } },
{"extrq", { XS, Ib, Ib } },
{"insertq", { XM, XS, Ib, Ib } },
},
/* PREGRP35 */
{
{"vmwrite", { Gm, Em } },
{"(bad)", { XX } },
{"extrq", { XM, XS } },
{"insertq", { XM, XS } },
},
/* PREGRP36 */
{
{ "bsrS", { Gv, Ev } },
{ "lzcntS", { Gv, Ev } },
{ "bsrS", { Gv, Ev } },
{ "(bad)", { XX } },
},
/* PREGRP37 */
{
{ "(bad)", { XX } },
{ "popcntS", { Gv, Ev } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* PREGRP38 */
{
{ "xchgS", { { NOP_Fixup1, eAX_reg }, { NOP_Fixup2, eAX_reg } } },
{ "pause", { XX } },
{ "xchgS", { { NOP_Fixup1, eAX_reg }, { NOP_Fixup2, eAX_reg } } },
{ "(bad)", { XX } },
},
/* PREGRP39 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pblendvb", {XM, EXx, XMM0 } },
{ "(bad)", { XX } },
},
/* PREGRP40 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "blendvps", {XM, EXx, XMM0 } },
{ "(bad)", { XX } },
},
/* PREGRP41 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "blendvpd", { XM, EXx, XMM0 } },
{ "(bad)", { XX } },
},
/* PREGRP42 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "ptest", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP43 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxbw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP44 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxbd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP45 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxbq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP46 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxwd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP47 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxwq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP48 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovsxdq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP49 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmuldq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP50 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpeqq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP51 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "movntdqa", { XM, EM } },
{ "(bad)", { XX } },
},
/* PREGRP52 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "packusdw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP53 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxbw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP54 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxbd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP55 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxbq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP56 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxwd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP57 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxwq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP58 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmovzxdq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP59 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pminsb", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP60 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pminsd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP61 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pminuw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP62 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pminud", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP63 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmaxsb", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP64 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmaxsd", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP65 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmaxuw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP66 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmaxud", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP67 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pmulld", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP68 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "phminposuw", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP69 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "roundps", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP70 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "roundpd", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP71 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "roundss", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP72 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "roundsd", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP73 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "blendps", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP74 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "blendpd", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP75 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pblendw", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP76 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pextrb", { Edqb, XM, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP77 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pextrw", { Edqw, XM, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP78 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pextrK", { Edq, XM, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP79 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "extractps", { Edqd, XM, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP80 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pinsrb", { XM, Edqb, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP81 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "insertps", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP82 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pinsrK", { XM, Edq, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP83 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "dpps", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP84 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "dppd", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP85 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "mpsadbw", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP86 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpgtq", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP87 */
{
{ "movbe", { Gv, Ev } },
{ "(bad)", { XX } },
{ "movbe", { Gv, Ev } },
{ "crc32", { Gdq, { CRC32_Fixup, b_mode } } },
},
/* PREGRP88 */
{
{ "movbe", { Ev, Gv } },
{ "(bad)", { XX } },
{ "movbe", { Ev, Gv } },
{ "crc32", { Gdq, { CRC32_Fixup, v_mode } } },
},
/* PREGRP89 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpestrm", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP90 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpestri", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP91 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpistrm", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP92 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pcmpistri", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP93 */
{
{ "ucomiss",{ XM, EXd } },
{ "(bad)", { XX } },
{ "ucomisd",{ XM, EXq } },
{ "(bad)", { XX } },
},
/* PREGRP94 */
{
{ "comiss", { XM, EXd } },
{ "(bad)", { XX } },
{ "comisd", { XM, EXq } },
{ "(bad)", { XX } },
},
/* PREGRP95 */
{
{ "punpcklbw",{ MX, EMd } },
{ "(bad)", { XX } },
{ "punpcklbw",{ MX, EMq } },
{ "(bad)", { XX } },
},
/* PREGRP96 */
{
{ "punpcklwd",{ MX, EMd } },
{ "(bad)", { XX } },
{ "punpcklwd",{ MX, EMq } },
{ "(bad)", { XX } },
},
/* PREGRP97 */
{
{ "punpckldq",{ MX, EMd } },
{ "(bad)", { XX } },
{ "punpckldq",{ MX, EMq } },
{ "(bad)", { XX } },
},
/* PREGRP98 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pclmulqdq", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP99 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aesimc", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP100 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aesenc", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP101 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aesenclast", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP102 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aesdec", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP103 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aesdeclast", { XM, EXx } },
{ "(bad)", { XX } },
},
/* PREGRP104 */
{
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "aeskeygenassist", { XM, EXx, Ib } },
{ "(bad)", { XX } },
},
/* PREGRP105 */
{
{ "andnS", { Gv, Bv, Ev } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* PREGRP106 */
{
{ "bextrS", { Gv, Ev, Bv } },
{ "sarxS", { Gv, Ev, Bv } },
{ "shlxS", { Gv, Ev, Bv } },
{ "shrxS", { Gv, Ev, Bv } },
},
};
static const struct dis386 x86_64_table[][2] = {
{
{ "pusha{P|}", { XX } },
{ "(bad)", { XX } },
},
{
{ "popa{P|}", { XX } },
{ "(bad)", { XX } },
},
{
{ "bound{S|}", { Gv, Ma } },
{ "(bad)", { XX } },
},
{
{ "arpl", { Ew, Gw } },
{ "movs{||lq|xd}", { Gv, Ed } },
},
};
static const struct dis386 three_byte_table[][256] = {
/* THREE_BYTE_0 */
{
/* 00 */
{ "pshufb", { MX, EM } },
{ "phaddw", { MX, EM } },
{ "phaddd", { MX, EM } },
{ "phaddsw", { MX, EM } },
{ "pmaddubsw", { MX, EM } },
{ "phsubw", { MX, EM } },
{ "phsubd", { MX, EM } },
{ "phsubsw", { MX, EM } },
/* 08 */
{ "psignb", { MX, EM } },
{ "psignw", { MX, EM } },
{ "psignd", { MX, EM } },
{ "pmulhrsw", { MX, EM } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 10 */
{ PREGRP39 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP40 },
{ PREGRP41 },
{ "(bad)", { XX } },
{ PREGRP42 },
/* 18 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "pabsb", { MX, EM } },
{ "pabsw", { MX, EM } },
{ "pabsd", { MX, EM } },
{ "(bad)", { XX } },
/* 20 */
{ PREGRP43 },
{ PREGRP44 },
{ PREGRP45 },
{ PREGRP46 },
{ PREGRP47 },
{ PREGRP48 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 28 */
{ PREGRP49 },
{ PREGRP50 },
{ PREGRP51 },
{ PREGRP52 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 30 */
{ PREGRP53 },
{ PREGRP54 },
{ PREGRP55 },
{ PREGRP56 },
{ PREGRP57 },
{ PREGRP58 },
{ "(bad)", { XX } },
{ PREGRP86 },
/* 38 */
{ PREGRP59 },
{ PREGRP60 },
{ PREGRP61 },
{ PREGRP62 },
{ PREGRP63 },
{ PREGRP64 },
{ PREGRP65 },
{ PREGRP66 },
/* 40 */
{ PREGRP67 },
{ PREGRP68 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 48 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 50 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 58 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 60 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 68 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 70 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 78 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 80 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 88 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 90 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 98 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* a0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* a8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* b0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* b8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* c0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* c8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* d0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* d8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP99 },
{ PREGRP100 },
{ PREGRP101 },
{ PREGRP102 },
{ PREGRP103 },
/* e0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* e8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* f0 */
{ PREGRP87 },
{ PREGRP88 },
{ PREGRP105 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP106 },
/* f8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* THREE_BYTE_1 */
{
/* 00 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 08 */
{ PREGRP69 },
{ PREGRP70 },
{ PREGRP71 },
{ PREGRP72 },
{ PREGRP73 },
{ PREGRP74 },
{ PREGRP75 },
{ "palignr", { MX, EM, Ib } },
/* 10 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP76 },
{ PREGRP77 },
{ PREGRP78 },
{ PREGRP79 },
/* 18 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 20 */
{ PREGRP80 },
{ PREGRP81 },
{ PREGRP82 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 28 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 30 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 38 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 40 */
{ PREGRP83 },
{ PREGRP84 },
{ PREGRP85 },
{ "(bad)", { XX } },
{ PREGRP98 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 48 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 50 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 58 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 60 */
{ PREGRP89 },
{ PREGRP90 },
{ PREGRP91 },
{ PREGRP92 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 68 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 70 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 78 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 80 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 88 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 90 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* 98 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* a0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* a8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* b0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* b8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* c0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* c8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* d0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* d8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ PREGRP104 },
/* e0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* e8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* f0 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
/* f8 */
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
}
};
#define INTERNAL_DISASSEMBLER_ERROR _("<internal disassembler error>")
static void
ckprefix (void)
{
int newrex;
rex = 0;
prefixes = 0;
used_prefixes = 0;
rex_used = 0;
while (1)
{
fetch_data(the_info, codep + 1);
newrex = 0;
switch (*codep)
{
/* REX prefixes family. */
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4a:
case 0x4b:
case 0x4c:
case 0x4d:
case 0x4e:
case 0x4f:
if (address_mode == mode_64bit)
newrex = *codep;
else
return;
break;
case 0xf3:
prefixes |= PREFIX_REPZ;
break;
case 0xf2:
prefixes |= PREFIX_REPNZ;
break;
case 0xf0:
prefixes |= PREFIX_LOCK;
break;
case 0x2e:
prefixes |= PREFIX_CS;
break;
case 0x36:
prefixes |= PREFIX_SS;
break;
case 0x3e:
prefixes |= PREFIX_DS;
break;
case 0x26:
prefixes |= PREFIX_ES;
break;
case 0x64:
prefixes |= PREFIX_FS;
break;
case 0x65:
prefixes |= PREFIX_GS;
break;
case 0x66:
prefixes |= PREFIX_DATA;
break;
case 0x67:
prefixes |= PREFIX_ADDR;
break;
case FWAIT_OPCODE:
/* fwait is really an instruction. If there are prefixes
before the fwait, they belong to the fwait, *not* to the
following instruction. */
if (prefixes || rex)
{
prefixes |= PREFIX_FWAIT;
codep++;
return;
}
prefixes = PREFIX_FWAIT;
break;
default:
return;
}
/* Rex is ignored when followed by another prefix. */
if (rex)
{
rex_used = rex;
return;
}
rex = newrex;
codep++;
}
}
static void
ckvexprefix (void)
{
int op, vex2, vex3, newrex = 0, newpfx = prefixes;
if (address_mode == mode_16bit) {
return;
}
fetch_data(the_info, codep + 1);
op = *codep;
if (op != 0xc4 && op != 0xc5) {
return;
}
fetch_data(the_info, codep + 2);
vex2 = codep[1];
if (address_mode == mode_32bit && (vex2 & 0xc0) != 0xc0) {
return;
}
if (op == 0xc4) {
/* Three byte VEX prefix. */
fetch_data(the_info, codep + 3);
vex3 = codep[2];
newrex |= (vex2 & 0x80 ? 0 : REX_R);
newrex |= (vex2 & 0x40 ? 0 : REX_X);
newrex |= (vex2 & 0x20 ? 0 : REX_B);
newrex |= (vex3 & 0x80 ? REX_W : 0);
switch (vex2 & 0x1f) { /* VEX.m-mmmm */
case 1:
newpfx |= PREFIX_VEX_0F;
break;
case 2:
newpfx |= PREFIX_VEX_0F | PREFIX_VEX_0F38;
break;
case 3:
newpfx |= PREFIX_VEX_0F | PREFIX_VEX_0F3A;
break;
}
vex2 = vex3;
codep += 3;
} else {
/* Two byte VEX prefix. */
newrex |= (vex2 & 0x80 ? 0 : REX_R);
codep += 2;
}
vex_reg = (~vex2 >> 3) & 15; /* VEX.vvvv */
switch (vex2 & 3) { /* VEX.pp */
case 1:
newpfx |= PREFIX_DATA; /* 0x66 */
break;
case 2:
newpfx |= PREFIX_REPZ; /* 0xf3 */
break;
case 3:
newpfx |= PREFIX_REPNZ; /* 0xf2 */
break;
}
rex = newrex;
prefixes = newpfx;
}
/* Return the name of the prefix byte PREF, or NULL if PREF is not a
prefix byte. */
static const char *
prefix_name (int pref, int sizeflag)
{
static const char * const rexes [16] =
{
"rex", /* 0x40 */
"rex.B", /* 0x41 */
"rex.X", /* 0x42 */
"rex.XB", /* 0x43 */
"rex.R", /* 0x44 */
"rex.RB", /* 0x45 */
"rex.RX", /* 0x46 */
"rex.RXB", /* 0x47 */
"rex.W", /* 0x48 */
"rex.WB", /* 0x49 */
"rex.WX", /* 0x4a */
"rex.WXB", /* 0x4b */
"rex.WR", /* 0x4c */
"rex.WRB", /* 0x4d */
"rex.WRX", /* 0x4e */
"rex.WRXB", /* 0x4f */
};
switch (pref)
{
/* REX prefixes family. */
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x44:
case 0x45:
case 0x46:
case 0x47:
case 0x48:
case 0x49:
case 0x4a:
case 0x4b:
case 0x4c:
case 0x4d:
case 0x4e:
case 0x4f:
return rexes [pref - 0x40];
case 0xf3:
return "repz";
case 0xf2:
return "repnz";
case 0xf0:
return "lock";
case 0x2e:
return "cs";
case 0x36:
return "ss";
case 0x3e:
return "ds";
case 0x26:
return "es";
case 0x64:
return "fs";
case 0x65:
return "gs";
case 0x66:
return (sizeflag & DFLAG) ? "data16" : "data32";
case 0x67:
if (address_mode == mode_64bit)
return (sizeflag & AFLAG) ? "addr32" : "addr64";
else
return (sizeflag & AFLAG) ? "addr16" : "addr32";
case FWAIT_OPCODE:
return "fwait";
default:
return NULL;
}
}
static char op_out[MAX_OPERANDS][100];
static int op_ad, op_index[MAX_OPERANDS];
static int two_source_ops;
static bfd_vma op_address[MAX_OPERANDS];
static bfd_vma op_riprel[MAX_OPERANDS];
static bfd_vma start_pc;
/*
* On the 386's of 1988, the maximum length of an instruction is 15 bytes.
* (see topic "Redundant prefixes" in the "Differences from 8086"
* section of the "Virtual 8086 Mode" chapter.)
* 'pc' should be the address of this instruction, it will
* be used to print the target address if this is a relative jump or call
* The function returns the length of this instruction in bytes.
*/
static char intel_syntax;
static char open_char;
static char close_char;
static char separator_char;
static char scale_char;
int
print_insn_i386 (bfd_vma pc, disassemble_info *info)
{
intel_syntax = -1;
return print_insn (pc, info);
}
static int
print_insn (bfd_vma pc, disassemble_info *info)
{
const struct dis386 *dp;
int i;
char *op_txt[MAX_OPERANDS];
int needcomma;
unsigned char uses_DATA_prefix, uses_LOCK_prefix;
unsigned char uses_REPNZ_prefix, uses_REPZ_prefix;
int sizeflag;
const char *p;
struct dis_private priv;
unsigned char op;
unsigned char threebyte;
if (info->mach == bfd_mach_x86_64_intel_syntax
|| info->mach == bfd_mach_x86_64)
address_mode = mode_64bit;
else
address_mode = mode_32bit;
if (intel_syntax == (char) -1)
intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax
|| info->mach == bfd_mach_x86_64_intel_syntax);
if (info->mach == bfd_mach_i386_i386
|| info->mach == bfd_mach_x86_64
|| info->mach == bfd_mach_i386_i386_intel_syntax
|| info->mach == bfd_mach_x86_64_intel_syntax)
priv.orig_sizeflag = AFLAG | DFLAG;
else if (info->mach == bfd_mach_i386_i8086)
priv.orig_sizeflag = 0;
else
abort ();
for (p = info->disassembler_options; p != NULL; )
{
if (strncmp (p, "x86-64", 6) == 0)
{
address_mode = mode_64bit;
priv.orig_sizeflag = AFLAG | DFLAG;
}
else if (strncmp (p, "i386", 4) == 0)
{
address_mode = mode_32bit;
priv.orig_sizeflag = AFLAG | DFLAG;
}
else if (strncmp (p, "i8086", 5) == 0)
{
address_mode = mode_16bit;
priv.orig_sizeflag = 0;
}
else if (strncmp (p, "intel", 5) == 0)
{
intel_syntax = 1;
}
else if (strncmp (p, "att", 3) == 0)
{
intel_syntax = 0;
}
else if (strncmp (p, "addr", 4) == 0)
{
if (address_mode == mode_64bit)
{
if (p[4] == '3' && p[5] == '2')
priv.orig_sizeflag &= ~AFLAG;
else if (p[4] == '6' && p[5] == '4')
priv.orig_sizeflag |= AFLAG;
}
else
{
if (p[4] == '1' && p[5] == '6')
priv.orig_sizeflag &= ~AFLAG;
else if (p[4] == '3' && p[5] == '2')
priv.orig_sizeflag |= AFLAG;
}
}
else if (strncmp (p, "data", 4) == 0)
{
if (p[4] == '1' && p[5] == '6')
priv.orig_sizeflag &= ~DFLAG;
else if (p[4] == '3' && p[5] == '2')
priv.orig_sizeflag |= DFLAG;
}
else if (strncmp (p, "suffix", 6) == 0)
priv.orig_sizeflag |= SUFFIX_ALWAYS;
p = strchr (p, ',');
if (p != NULL)
p++;
}
if (intel_syntax)
{
names64 = intel_names64;
names32 = intel_names32;
names16 = intel_names16;
names8 = intel_names8;
names8rex = intel_names8rex;
names_seg = intel_names_seg;
index16 = intel_index16;
open_char = '[';
close_char = ']';
separator_char = '+';
scale_char = '*';
}
else
{
names64 = att_names64;
names32 = att_names32;
names16 = att_names16;
names8 = att_names8;
names8rex = att_names8rex;
names_seg = att_names_seg;
index16 = att_index16;
open_char = '(';
close_char = ')';
separator_char = ',';
scale_char = ',';
}
/* The output looks better if we put 7 bytes on a line, since that
puts most long word instructions on a single line. */
info->bytes_per_line = 7;
info->private_data = &priv;
priv.max_fetched = priv.the_buffer;
priv.insn_start = pc;
obuf[0] = 0;
for (i = 0; i < MAX_OPERANDS; ++i)
{
op_out[i][0] = 0;
op_index[i] = -1;
}
the_info = info;
start_pc = pc;
start_codep = priv.the_buffer;
codep = priv.the_buffer;
if (sigsetjmp(priv.bailout, 0) != 0)
{
const char *name;
/* Getting here means we tried for data but didn't get it. That
means we have an incomplete instruction of some sort. Just
print the first byte as a prefix or a .byte pseudo-op. */
if (codep > priv.the_buffer)
{
name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
if (name != NULL)
(*info->fprintf_func) (info->stream, "%s", name);
else
{
/* Just print the first byte as a .byte instruction. */
(*info->fprintf_func) (info->stream, ".byte 0x%x",
(unsigned int) priv.the_buffer[0]);
}
return 1;
}
return -1;
}
obufp = obuf;
ckprefix ();
ckvexprefix ();
insn_codep = codep;
sizeflag = priv.orig_sizeflag;
fetch_data(info, codep + 1);
two_source_ops = (*codep == 0x62) || (*codep == 0xc8);
if (((prefixes & PREFIX_FWAIT)
&& ((*codep < 0xd8) || (*codep > 0xdf)))
|| (rex && rex_used))
{
const char *name;
/* fwait not followed by floating point instruction, or rex followed
by other prefixes. Print the first prefix. */
name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
if (name == NULL)
name = INTERNAL_DISASSEMBLER_ERROR;
(*info->fprintf_func) (info->stream, "%s", name);
return 1;
}
op = 0;
if (prefixes & PREFIX_VEX_0F)
{
used_prefixes |= PREFIX_VEX_0F | PREFIX_VEX_0F38 | PREFIX_VEX_0F3A;
if (prefixes & PREFIX_VEX_0F38)
threebyte = 0x38;
else if (prefixes & PREFIX_VEX_0F3A)
threebyte = 0x3a;
else
threebyte = *codep++;
goto vex_opcode;
}
if (*codep == 0x0f)
{
fetch_data(info, codep + 2);
threebyte = codep[1];
codep += 2;
vex_opcode:
dp = &dis386_twobyte[threebyte];
need_modrm = twobyte_has_modrm[threebyte];
uses_DATA_prefix = twobyte_uses_DATA_prefix[threebyte];
uses_REPNZ_prefix = twobyte_uses_REPNZ_prefix[threebyte];
uses_REPZ_prefix = twobyte_uses_REPZ_prefix[threebyte];
uses_LOCK_prefix = (threebyte & ~0x02) == 0x20;
if (dp->name == NULL && dp->op[0].bytemode == IS_3BYTE_OPCODE)
{
fetch_data(info, codep + 2);
op = *codep++;
switch (threebyte)
{
case 0x38:
uses_DATA_prefix = threebyte_0x38_uses_DATA_prefix[op];
uses_REPNZ_prefix = threebyte_0x38_uses_REPNZ_prefix[op];
uses_REPZ_prefix = threebyte_0x38_uses_REPZ_prefix[op];
break;
case 0x3a:
uses_DATA_prefix = threebyte_0x3a_uses_DATA_prefix[op];
uses_REPNZ_prefix = threebyte_0x3a_uses_REPNZ_prefix[op];
uses_REPZ_prefix = threebyte_0x3a_uses_REPZ_prefix[op];
break;
default:
break;
}
}
}
else
{
dp = &dis386[*codep];
need_modrm = onebyte_has_modrm[*codep];
uses_DATA_prefix = 0;
uses_REPNZ_prefix = 0;
/* pause is 0xf3 0x90. */
uses_REPZ_prefix = *codep == 0x90;
uses_LOCK_prefix = 0;
codep++;
}
if (!uses_REPZ_prefix && (prefixes & PREFIX_REPZ))
{
oappend ("repz ");
used_prefixes |= PREFIX_REPZ;
}
if (!uses_REPNZ_prefix && (prefixes & PREFIX_REPNZ))
{
oappend ("repnz ");
used_prefixes |= PREFIX_REPNZ;
}
if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK))
{
oappend ("lock ");
used_prefixes |= PREFIX_LOCK;
}
if (prefixes & PREFIX_ADDR)
{
sizeflag ^= AFLAG;
if (dp->op[2].bytemode != loop_jcxz_mode || intel_syntax)
{
if ((sizeflag & AFLAG) || address_mode == mode_64bit)
oappend ("addr32 ");
else
oappend ("addr16 ");
used_prefixes |= PREFIX_ADDR;
}
}
if (!uses_DATA_prefix && (prefixes & PREFIX_DATA))
{
sizeflag ^= DFLAG;
if (dp->op[2].bytemode == cond_jump_mode
&& dp->op[0].bytemode == v_mode
&& !intel_syntax)
{
if (sizeflag & DFLAG)
oappend ("data32 ");
else
oappend ("data16 ");
used_prefixes |= PREFIX_DATA;
}
}
if (dp->name == NULL && dp->op[0].bytemode == IS_3BYTE_OPCODE)
{
dp = &three_byte_table[dp->op[1].bytemode][op];
modrm.mod = (*codep >> 6) & 3;
modrm.reg = (*codep >> 3) & 7;
modrm.rm = *codep & 7;
}
else if (need_modrm)
{
fetch_data(info, codep + 1);
modrm.mod = (*codep >> 6) & 3;
modrm.reg = (*codep >> 3) & 7;
modrm.rm = *codep & 7;
}
if (dp->name == NULL && dp->op[0].bytemode == FLOATCODE)
{
dofloat (sizeflag);
}
else
{
int index;
if (dp->name == NULL)
{
switch (dp->op[0].bytemode)
{
case USE_GROUPS:
dp = &grps[dp->op[1].bytemode][modrm.reg];
break;
case USE_PREFIX_USER_TABLE:
index = 0;
used_prefixes |= (prefixes & PREFIX_REPZ);
if (prefixes & PREFIX_REPZ)
index = 1;
else
{
/* We should check PREFIX_REPNZ and PREFIX_REPZ
before PREFIX_DATA. */
used_prefixes |= (prefixes & PREFIX_REPNZ);
if (prefixes & PREFIX_REPNZ)
index = 3;
else
{
used_prefixes |= (prefixes & PREFIX_DATA);
if (prefixes & PREFIX_DATA)
index = 2;
}
}
dp = &prefix_user_table[dp->op[1].bytemode][index];
break;
case X86_64_SPECIAL:
index = address_mode == mode_64bit ? 1 : 0;
dp = &x86_64_table[dp->op[1].bytemode][index];
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
break;
}
}
if (putop (dp->name, sizeflag) == 0)
{
for (i = 0; i < MAX_OPERANDS; ++i)
{
obufp = op_out[i];
op_ad = MAX_OPERANDS - 1 - i;
if (dp->op[i].rtn)
(*dp->op[i].rtn) (dp->op[i].bytemode, sizeflag);
}
}
}
/* See if any prefixes were not used. If so, print the first one
separately. If we don't do this, we'll wind up printing an
instruction stream which does not precisely correspond to the
bytes we are disassembling. */
if ((prefixes & ~used_prefixes) != 0)
{
const char *name;
name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
if (name == NULL)
name = INTERNAL_DISASSEMBLER_ERROR;
(*info->fprintf_func) (info->stream, "%s", name);
return 1;
}
if (rex & ~rex_used)
{
const char *name;
name = prefix_name (rex | 0x40, priv.orig_sizeflag);
if (name == NULL)
name = INTERNAL_DISASSEMBLER_ERROR;
(*info->fprintf_func) (info->stream, "%s ", name);
}
obufp = obuf + strlen (obuf);
for (i = strlen (obuf); i < 6; i++)
oappend (" ");
oappend (" ");
(*info->fprintf_func) (info->stream, "%s", obuf);
/* The enter and bound instructions are printed with operands in the same
order as the intel book; everything else is printed in reverse order. */
if (intel_syntax || two_source_ops)
{
bfd_vma riprel;
for (i = 0; i < MAX_OPERANDS; ++i)
op_txt[i] = op_out[i];
for (i = 0; i < (MAX_OPERANDS >> 1); ++i)
{
op_ad = op_index[i];
op_index[i] = op_index[MAX_OPERANDS - 1 - i];
op_index[MAX_OPERANDS - 1 - i] = op_ad;
riprel = op_riprel[i];
op_riprel[i] = op_riprel [MAX_OPERANDS - 1 - i];
op_riprel[MAX_OPERANDS - 1 - i] = riprel;
}
}
else
{
for (i = 0; i < MAX_OPERANDS; ++i)
op_txt[MAX_OPERANDS - 1 - i] = op_out[i];
}
needcomma = 0;
for (i = 0; i < MAX_OPERANDS; ++i)
if (*op_txt[i])
{
if (needcomma)
(*info->fprintf_func) (info->stream, ",");
if (op_index[i] != -1 && !op_riprel[i])
(*info->print_address_func) ((bfd_vma) op_address[op_index[i]], info);
else
(*info->fprintf_func) (info->stream, "%s", op_txt[i]);
needcomma = 1;
}
for (i = 0; i < MAX_OPERANDS; i++)
if (op_index[i] != -1 && op_riprel[i])
{
(*info->fprintf_func) (info->stream, " # ");
(*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep
+ op_address[op_index[i]]), info);
break;
}
return codep - priv.the_buffer;
}
static const char *float_mem[] = {
/* d8 */
"fadd{s||s|}",
"fmul{s||s|}",
"fcom{s||s|}",
"fcomp{s||s|}",
"fsub{s||s|}",
"fsubr{s||s|}",
"fdiv{s||s|}",
"fdivr{s||s|}",
/* d9 */
"fld{s||s|}",
"(bad)",
"fst{s||s|}",
"fstp{s||s|}",
"fldenvIC",
"fldcw",
"fNstenvIC",
"fNstcw",
/* da */
"fiadd{l||l|}",
"fimul{l||l|}",
"ficom{l||l|}",
"ficomp{l||l|}",
"fisub{l||l|}",
"fisubr{l||l|}",
"fidiv{l||l|}",
"fidivr{l||l|}",
/* db */
"fild{l||l|}",
"fisttp{l||l|}",
"fist{l||l|}",
"fistp{l||l|}",
"(bad)",
"fld{t||t|}",
"(bad)",
"fstp{t||t|}",
/* dc */
"fadd{l||l|}",
"fmul{l||l|}",
"fcom{l||l|}",
"fcomp{l||l|}",
"fsub{l||l|}",
"fsubr{l||l|}",
"fdiv{l||l|}",
"fdivr{l||l|}",
/* dd */
"fld{l||l|}",
"fisttp{ll||ll|}",
"fst{l||l|}",
"fstp{l||l|}",
"frstorIC",
"(bad)",
"fNsaveIC",
"fNstsw",
/* de */
"fiadd",
"fimul",
"ficom",
"ficomp",
"fisub",
"fisubr",
"fidiv",
"fidivr",
/* df */
"fild",
"fisttp",
"fist",
"fistp",
"fbld",
"fild{ll||ll|}",
"fbstp",
"fistp{ll||ll|}",
};
static const unsigned char float_mem_mode[] = {
/* d8 */
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
/* d9 */
d_mode,
0,
d_mode,
d_mode,
0,
w_mode,
0,
w_mode,
/* da */
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
d_mode,
/* db */
d_mode,
d_mode,
d_mode,
d_mode,
0,
t_mode,
0,
t_mode,
/* dc */
q_mode,
q_mode,
q_mode,
q_mode,
q_mode,
q_mode,
q_mode,
q_mode,
/* dd */
q_mode,
q_mode,
q_mode,
q_mode,
0,
0,
0,
w_mode,
/* de */
w_mode,
w_mode,
w_mode,
w_mode,
w_mode,
w_mode,
w_mode,
w_mode,
/* df */
w_mode,
w_mode,
w_mode,
w_mode,
t_mode,
q_mode,
t_mode,
q_mode
};
#define ST { OP_ST, 0 }
#define STi { OP_STi, 0 }
#define FGRPd9_2 NULL, { { NULL, 0 } }
#define FGRPd9_4 NULL, { { NULL, 1 } }
#define FGRPd9_5 NULL, { { NULL, 2 } }
#define FGRPd9_6 NULL, { { NULL, 3 } }
#define FGRPd9_7 NULL, { { NULL, 4 } }
#define FGRPda_5 NULL, { { NULL, 5 } }
#define FGRPdb_4 NULL, { { NULL, 6 } }
#define FGRPde_3 NULL, { { NULL, 7 } }
#define FGRPdf_4 NULL, { { NULL, 8 } }
static const struct dis386 float_reg[][8] = {
/* d8 */
{
{ "fadd", { ST, STi } },
{ "fmul", { ST, STi } },
{ "fcom", { STi } },
{ "fcomp", { STi } },
{ "fsub", { ST, STi } },
{ "fsubr", { ST, STi } },
{ "fdiv", { ST, STi } },
{ "fdivr", { ST, STi } },
},
/* d9 */
{
{ "fld", { STi } },
{ "fxch", { STi } },
{ FGRPd9_2 },
{ "(bad)", { XX } },
{ FGRPd9_4 },
{ FGRPd9_5 },
{ FGRPd9_6 },
{ FGRPd9_7 },
},
/* da */
{
{ "fcmovb", { ST, STi } },
{ "fcmove", { ST, STi } },
{ "fcmovbe",{ ST, STi } },
{ "fcmovu", { ST, STi } },
{ "(bad)", { XX } },
{ FGRPda_5 },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* db */
{
{ "fcmovnb",{ ST, STi } },
{ "fcmovne",{ ST, STi } },
{ "fcmovnbe",{ ST, STi } },
{ "fcmovnu",{ ST, STi } },
{ FGRPdb_4 },
{ "fucomi", { ST, STi } },
{ "fcomi", { ST, STi } },
{ "(bad)", { XX } },
},
/* dc */
{
{ "fadd", { STi, ST } },
{ "fmul", { STi, ST } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
#if SYSV386_COMPAT
{ "fsub", { STi, ST } },
{ "fsubr", { STi, ST } },
{ "fdiv", { STi, ST } },
{ "fdivr", { STi, ST } },
#else
{ "fsubr", { STi, ST } },
{ "fsub", { STi, ST } },
{ "fdivr", { STi, ST } },
{ "fdiv", { STi, ST } },
#endif
},
/* dd */
{
{ "ffree", { STi } },
{ "(bad)", { XX } },
{ "fst", { STi } },
{ "fstp", { STi } },
{ "fucom", { STi } },
{ "fucomp", { STi } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
},
/* de */
{
{ "faddp", { STi, ST } },
{ "fmulp", { STi, ST } },
{ "(bad)", { XX } },
{ FGRPde_3 },
#if SYSV386_COMPAT
{ "fsubp", { STi, ST } },
{ "fsubrp", { STi, ST } },
{ "fdivp", { STi, ST } },
{ "fdivrp", { STi, ST } },
#else
{ "fsubrp", { STi, ST } },
{ "fsubp", { STi, ST } },
{ "fdivrp", { STi, ST } },
{ "fdivp", { STi, ST } },
#endif
},
/* df */
{
{ "ffreep", { STi } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ "(bad)", { XX } },
{ FGRPdf_4 },
{ "fucomip", { ST, STi } },
{ "fcomip", { ST, STi } },
{ "(bad)", { XX } },
},
};
static const char *fgrps[][8] = {
/* d9_2 0 */
{
"fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
},
/* d9_4 1 */
{
"fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)",
},
/* d9_5 2 */
{
"fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)",
},
/* d9_6 3 */
{
"f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp",
},
/* d9_7 4 */
{
"fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos",
},
/* da_5 5 */
{
"(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
},
/* db_4 6 */
{
"feni(287 only)","fdisi(287 only)","fNclex","fNinit",
"fNsetpm(287 only)","(bad)","(bad)","(bad)",
},
/* de_3 7 */
{
"(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
},
/* df_4 8 */
{
"fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
},
};
static void
dofloat (int sizeflag)
{
const struct dis386 *dp;
unsigned char floatop;
floatop = codep[-1];
if (modrm.mod != 3)
{
int fp_indx = (floatop - 0xd8) * 8 + modrm.reg;
putop (float_mem[fp_indx], sizeflag);
obufp = op_out[0];
op_ad = 2;
OP_E (float_mem_mode[fp_indx], sizeflag);
return;
}
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
dp = &float_reg[floatop - 0xd8][modrm.reg];
if (dp->name == NULL)
{
putop (fgrps[dp->op[0].bytemode][modrm.rm], sizeflag);
/* Instruction fnstsw is only one with strange arg. */
if (floatop == 0xdf && codep[-1] == 0xe0)
pstrcpy (op_out[0], sizeof(op_out[0]), names16[0]);
}
else
{
putop (dp->name, sizeflag);
obufp = op_out[0];
op_ad = 2;
if (dp->op[0].rtn)
(*dp->op[0].rtn) (dp->op[0].bytemode, sizeflag);
obufp = op_out[1];
op_ad = 1;
if (dp->op[1].rtn)
(*dp->op[1].rtn) (dp->op[1].bytemode, sizeflag);
}
}
static void
OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
oappend ("%st" + intel_syntax);
}
static void
OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
snprintf (scratchbuf, sizeof(scratchbuf), "%%st(%d)", modrm.rm);
oappend (scratchbuf + intel_syntax);
}
/* Capital letters in template are macros. */
static int
putop (const char *template, int sizeflag)
{
const char *p;
int alt = 0;
for (p = template; *p; p++)
{
switch (*p)
{
default:
*obufp++ = *p;
break;
case '{':
alt = 0;
if (intel_syntax)
alt += 1;
if (address_mode == mode_64bit)
alt += 2;
while (alt != 0)
{
while (*++p != '|')
{
if (*p == '}')
{
/* Alternative not valid. */
pstrcpy (obuf, sizeof(obuf), "(bad)");
obufp = obuf + 5;
return 1;
}
else if (*p == '\0')
abort ();
}
alt--;
}
/* Fall through. */
case 'I':
alt = 1;
continue;
case '|':
while (*++p != '}')
{
if (*p == '\0')
abort ();
}
break;
case '}':
break;
case 'A':
if (intel_syntax)
break;
if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS))
*obufp++ = 'b';
break;
case 'B':
if (intel_syntax)
break;
if (sizeflag & SUFFIX_ALWAYS)
*obufp++ = 'b';
break;
case 'C':
if (intel_syntax && !alt)
break;
if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS))
{
if (sizeflag & DFLAG)
*obufp++ = intel_syntax ? 'd' : 'l';
else
*obufp++ = intel_syntax ? 'w' : 's';
used_prefixes |= (prefixes & PREFIX_DATA);
}
break;
case 'D':
if (intel_syntax || !(sizeflag & SUFFIX_ALWAYS))
break;
USED_REX (REX_W);
if (modrm.mod == 3)
{
if (rex & REX_W)
*obufp++ = 'q';
else if (sizeflag & DFLAG)
*obufp++ = intel_syntax ? 'd' : 'l';
else
*obufp++ = 'w';
used_prefixes |= (prefixes & PREFIX_DATA);
}
else
*obufp++ = 'w';
break;
case 'E': /* For jcxz/jecxz */
if (address_mode == mode_64bit)
{
if (sizeflag & AFLAG)
*obufp++ = 'r';
else
*obufp++ = 'e';
}
else
if (sizeflag & AFLAG)
*obufp++ = 'e';
used_prefixes |= (prefixes & PREFIX_ADDR);
break;
case 'F':
if (intel_syntax)
break;
if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS))
{
if (sizeflag & AFLAG)
*obufp++ = address_mode == mode_64bit ? 'q' : 'l';
else
*obufp++ = address_mode == mode_64bit ? 'l' : 'w';
used_prefixes |= (prefixes & PREFIX_ADDR);
}
break;
case 'G':
if (intel_syntax || (obufp[-1] != 's' && !(sizeflag & SUFFIX_ALWAYS)))
break;
if ((rex & REX_W) || (sizeflag & DFLAG))
*obufp++ = 'l';
else
*obufp++ = 'w';
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case 'H':
if (intel_syntax)
break;
if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS
|| (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS)
{
used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS);
*obufp++ = ',';
*obufp++ = 'p';
if (prefixes & PREFIX_DS)
*obufp++ = 't';
else
*obufp++ = 'n';
}
break;
case 'J':
if (intel_syntax)
break;
*obufp++ = 'l';
break;
case 'K':
USED_REX (REX_W);
if (rex & REX_W)
*obufp++ = 'q';
else
*obufp++ = 'd';
break;
case 'Z':
if (intel_syntax)
break;
if (address_mode == mode_64bit && (sizeflag & SUFFIX_ALWAYS))
{
*obufp++ = 'q';
break;
}
/* Fall through. */
case 'L':
if (intel_syntax)
break;
if (sizeflag & SUFFIX_ALWAYS)
*obufp++ = 'l';
break;
case 'N':
if ((prefixes & PREFIX_FWAIT) == 0)
*obufp++ = 'n';
else
used_prefixes |= PREFIX_FWAIT;
break;
case 'O':
USED_REX (REX_W);
if (rex & REX_W)
*obufp++ = 'o';
else if (intel_syntax && (sizeflag & DFLAG))
*obufp++ = 'q';
else
*obufp++ = 'd';
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case 'T':
if (intel_syntax)
break;
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
*obufp++ = 'q';
break;
}
/* Fall through. */
case 'P':
if (intel_syntax)
break;
if ((prefixes & PREFIX_DATA)
|| (rex & REX_W)
|| (sizeflag & SUFFIX_ALWAYS))
{
USED_REX (REX_W);
if (rex & REX_W)
*obufp++ = 'q';
else
{
if (sizeflag & DFLAG)
*obufp++ = 'l';
else
*obufp++ = 'w';
}
used_prefixes |= (prefixes & PREFIX_DATA);
}
break;
case 'U':
if (intel_syntax)
break;
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS))
*obufp++ = 'q';
break;
}
/* Fall through. */
case 'Q':
if (intel_syntax && !alt)
break;
USED_REX (REX_W);
if (modrm.mod != 3 || (sizeflag & SUFFIX_ALWAYS))
{
if (rex & REX_W)
*obufp++ = 'q';
else
{
if (sizeflag & DFLAG)
*obufp++ = intel_syntax ? 'd' : 'l';
else
*obufp++ = 'w';
}
used_prefixes |= (prefixes & PREFIX_DATA);
}
break;
case 'R':
USED_REX (REX_W);
if (rex & REX_W)
*obufp++ = 'q';
else if (sizeflag & DFLAG)
{
if (intel_syntax)
*obufp++ = 'd';
else
*obufp++ = 'l';
}
else
*obufp++ = 'w';
if (intel_syntax && !p[1]
&& ((rex & REX_W) || (sizeflag & DFLAG)))
*obufp++ = 'e';
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case 'V':
if (intel_syntax)
break;
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
if (sizeflag & SUFFIX_ALWAYS)
*obufp++ = 'q';
break;
}
/* Fall through. */
case 'S':
if (intel_syntax)
break;
if (sizeflag & SUFFIX_ALWAYS)
{
if (rex & REX_W)
*obufp++ = 'q';
else
{
if (sizeflag & DFLAG)
*obufp++ = 'l';
else
*obufp++ = 'w';
used_prefixes |= (prefixes & PREFIX_DATA);
}
}
break;
case 'X':
if (prefixes & PREFIX_DATA)
*obufp++ = 'd';
else
*obufp++ = 's';
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case 'Y':
if (intel_syntax)
break;
if (rex & REX_W)
{
USED_REX (REX_W);
*obufp++ = 'q';
}
break;
/* implicit operand size 'l' for i386 or 'q' for x86-64 */
case 'W':
/* operand size flag for cwtl, cbtw */
USED_REX (REX_W);
if (rex & REX_W)
{
if (intel_syntax)
*obufp++ = 'd';
else
*obufp++ = 'l';
}
else if (sizeflag & DFLAG)
*obufp++ = 'w';
else
*obufp++ = 'b';
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
}
alt = 0;
}
*obufp = 0;
return 0;
}
static void
oappend (const char *s)
{
strcpy (obufp, s);
obufp += strlen (s);
}
static void
append_seg (void)
{
if (prefixes & PREFIX_CS)
{
used_prefixes |= PREFIX_CS;
oappend ("%cs:" + intel_syntax);
}
if (prefixes & PREFIX_DS)
{
used_prefixes |= PREFIX_DS;
oappend ("%ds:" + intel_syntax);
}
if (prefixes & PREFIX_SS)
{
used_prefixes |= PREFIX_SS;
oappend ("%ss:" + intel_syntax);
}
if (prefixes & PREFIX_ES)
{
used_prefixes |= PREFIX_ES;
oappend ("%es:" + intel_syntax);
}
if (prefixes & PREFIX_FS)
{
used_prefixes |= PREFIX_FS;
oappend ("%fs:" + intel_syntax);
}
if (prefixes & PREFIX_GS)
{
used_prefixes |= PREFIX_GS;
oappend ("%gs:" + intel_syntax);
}
}
static void
OP_indirE (int bytemode, int sizeflag)
{
if (!intel_syntax)
oappend ("*");
OP_E (bytemode, sizeflag);
}
static void
print_operand_value (char *buf, size_t bufsize, int hex, bfd_vma disp)
{
if (address_mode == mode_64bit)
{
if (hex)
{
char tmp[30];
int i;
buf[0] = '0';
buf[1] = 'x';
snprintf_vma (tmp, sizeof(tmp), disp);
for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++) {
}
pstrcpy (buf + 2, bufsize - 2, tmp + i);
}
else
{
bfd_signed_vma v = disp;
char tmp[30];
int i;
if (v < 0)
{
*(buf++) = '-';
v = -disp;
/* Check for possible overflow on 0x8000000000000000. */
if (v < 0)
{
pstrcpy (buf, bufsize, "9223372036854775808");
return;
}
}
if (!v)
{
pstrcpy (buf, bufsize, "0");
return;
}
i = 0;
tmp[29] = 0;
while (v)
{
tmp[28 - i] = (v % 10) + '0';
v /= 10;
i++;
}
pstrcpy (buf, bufsize, tmp + 29 - i);
}
}
else
{
if (hex)
snprintf (buf, bufsize, "0x%x", (unsigned int) disp);
else
snprintf (buf, bufsize, "%d", (int) disp);
}
}
/* Put DISP in BUF as signed hex number. */
static void
print_displacement (char *buf, bfd_vma disp)
{
bfd_signed_vma val = disp;
char tmp[30];
int i, j = 0;
if (val < 0)
{
buf[j++] = '-';
val = -disp;
/* Check for possible overflow. */
if (val < 0)
{
switch (address_mode)
{
case mode_64bit:
strcpy (buf + j, "0x8000000000000000");
break;
case mode_32bit:
strcpy (buf + j, "0x80000000");
break;
case mode_16bit:
strcpy (buf + j, "0x8000");
break;
}
return;
}
}
buf[j++] = '0';
buf[j++] = 'x';
snprintf_vma (tmp, sizeof(tmp), val);
for (i = 0; tmp[i] == '0'; i++)
continue;
if (tmp[i] == '\0')
i--;
strcpy (buf + j, tmp + i);
}
static void
intel_operand_size (int bytemode, int sizeflag)
{
switch (bytemode)
{
case b_mode:
case dqb_mode:
oappend ("BYTE PTR ");
break;
case w_mode:
case dqw_mode:
oappend ("WORD PTR ");
break;
case stack_v_mode:
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
oappend ("QWORD PTR ");
used_prefixes |= (prefixes & PREFIX_DATA);
break;
}
/* FALLTHRU */
case v_mode:
case dq_mode:
USED_REX (REX_W);
if (rex & REX_W)
oappend ("QWORD PTR ");
else if ((sizeflag & DFLAG) || bytemode == dq_mode)
oappend ("DWORD PTR ");
else
oappend ("WORD PTR ");
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case z_mode:
if ((rex & REX_W) || (sizeflag & DFLAG))
*obufp++ = 'D';
oappend ("WORD PTR ");
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case d_mode:
case dqd_mode:
oappend ("DWORD PTR ");
break;
case q_mode:
oappend ("QWORD PTR ");
break;
case m_mode:
if (address_mode == mode_64bit)
oappend ("QWORD PTR ");
else
oappend ("DWORD PTR ");
break;
case f_mode:
if (sizeflag & DFLAG)
oappend ("FWORD PTR ");
else
oappend ("DWORD PTR ");
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case t_mode:
oappend ("TBYTE PTR ");
break;
case x_mode:
oappend ("XMMWORD PTR ");
break;
case o_mode:
oappend ("OWORD PTR ");
break;
default:
break;
}
}
static void
OP_E (int bytemode, int sizeflag)
{
bfd_vma disp;
int add = 0;
int riprel = 0;
USED_REX (REX_B);
if (rex & REX_B)
add += 8;
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
if (modrm.mod == 3)
{
switch (bytemode)
{
case b_mode:
USED_REX (0);
if (rex)
oappend (names8rex[modrm.rm + add]);
else
oappend (names8[modrm.rm + add]);
break;
case w_mode:
oappend (names16[modrm.rm + add]);
break;
case d_mode:
oappend (names32[modrm.rm + add]);
break;
case q_mode:
oappend (names64[modrm.rm + add]);
break;
case m_mode:
if (address_mode == mode_64bit)
oappend (names64[modrm.rm + add]);
else
oappend (names32[modrm.rm + add]);
break;
case stack_v_mode:
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
oappend (names64[modrm.rm + add]);
used_prefixes |= (prefixes & PREFIX_DATA);
break;
}
bytemode = v_mode;
/* FALLTHRU */
case v_mode:
case dq_mode:
case dqb_mode:
case dqd_mode:
case dqw_mode:
USED_REX (REX_W);
if (rex & REX_W)
oappend (names64[modrm.rm + add]);
else if ((sizeflag & DFLAG) || bytemode != v_mode)
oappend (names32[modrm.rm + add]);
else
oappend (names16[modrm.rm + add]);
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case 0:
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
break;
}
return;
}
disp = 0;
if (intel_syntax)
intel_operand_size (bytemode, sizeflag);
append_seg ();
if ((sizeflag & AFLAG) || address_mode == mode_64bit)
{
/* 32/64 bit address mode */
int havedisp;
int havesib;
int havebase;
int base;
int index = 0;
int scale = 0;
havesib = 0;
havebase = 1;
base = modrm.rm;
if (base == 4)
{
havesib = 1;
fetch_data(the_info, codep + 1);
index = (*codep >> 3) & 7;
if (address_mode == mode_64bit || index != 0x4)
/* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */
scale = (*codep >> 6) & 3;
base = *codep & 7;
USED_REX (REX_X);
if (rex & REX_X)
index += 8;
codep++;
}
base += add;
switch (modrm.mod)
{
case 0:
if ((base & 7) == 5)
{
havebase = 0;
if (address_mode == mode_64bit && !havesib)
riprel = 1;
disp = get32s ();
}
break;
case 1:
fetch_data (the_info, codep + 1);
disp = *codep++;
if ((disp & 0x80) != 0)
disp -= 0x100;
break;
case 2:
disp = get32s ();
break;
}
havedisp = havebase || (havesib && (index != 4 || scale != 0));
if (!intel_syntax)
if (modrm.mod != 0 || (base & 7) == 5)
{
if (havedisp || riprel)
print_displacement (scratchbuf, disp);
else
print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp);
oappend (scratchbuf);
if (riprel)
{
set_op (disp, 1);
oappend ("(%rip)");
}
}
if (havedisp || (intel_syntax && riprel))
{
*obufp++ = open_char;
if (intel_syntax && riprel)
{
set_op (disp, 1);
oappend ("rip");
}
*obufp = '\0';
if (havebase)
oappend (address_mode == mode_64bit && (sizeflag & AFLAG)
? names64[base] : names32[base]);
if (havesib)
{
if (index != 4)
{
if (!intel_syntax || havebase)
{
*obufp++ = separator_char;
*obufp = '\0';
}
oappend (address_mode == mode_64bit && (sizeflag & AFLAG)
? names64[index] : names32[index]);
}
if (scale != 0 || (!intel_syntax && index != 4))
{
*obufp++ = scale_char;
*obufp = '\0';
snprintf (scratchbuf, sizeof(scratchbuf), "%d", 1 << scale);
oappend (scratchbuf);
}
}
if (intel_syntax
&& (disp || modrm.mod != 0 || (base & 7) == 5))
{
if ((bfd_signed_vma) disp >= 0)
{
*obufp++ = '+';
*obufp = '\0';
}
else if (modrm.mod != 1)
{
*obufp++ = '-';
*obufp = '\0';
disp = - (bfd_signed_vma) disp;
}
print_displacement (scratchbuf, disp);
oappend (scratchbuf);
}
*obufp++ = close_char;
*obufp = '\0';
}
else if (intel_syntax)
{
if (modrm.mod != 0 || (base & 7) == 5)
{
if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
| PREFIX_ES | PREFIX_FS | PREFIX_GS))
;
else
{
oappend (names_seg[ds_reg - es_reg]);
oappend (":");
}
print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp);
oappend (scratchbuf);
}
}
}
else
{ /* 16 bit address mode */
switch (modrm.mod)
{
case 0:
if (modrm.rm == 6)
{
disp = get16 ();
if ((disp & 0x8000) != 0)
disp -= 0x10000;
}
break;
case 1:
fetch_data(the_info, codep + 1);
disp = *codep++;
if ((disp & 0x80) != 0)
disp -= 0x100;
break;
case 2:
disp = get16 ();
if ((disp & 0x8000) != 0)
disp -= 0x10000;
break;
}
if (!intel_syntax)
if (modrm.mod != 0 || modrm.rm == 6)
{
print_displacement (scratchbuf, disp);
oappend (scratchbuf);
}
if (modrm.mod != 0 || modrm.rm != 6)
{
*obufp++ = open_char;
*obufp = '\0';
oappend (index16[modrm.rm]);
if (intel_syntax
&& (disp || modrm.mod != 0 || modrm.rm == 6))
{
if ((bfd_signed_vma) disp >= 0)
{
*obufp++ = '+';
*obufp = '\0';
}
else if (modrm.mod != 1)
{
*obufp++ = '-';
*obufp = '\0';
disp = - (bfd_signed_vma) disp;
}
print_displacement (scratchbuf, disp);
oappend (scratchbuf);
}
*obufp++ = close_char;
*obufp = '\0';
}
else if (intel_syntax)
{
if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
| PREFIX_ES | PREFIX_FS | PREFIX_GS))
;
else
{
oappend (names_seg[ds_reg - es_reg]);
oappend (":");
}
print_operand_value (scratchbuf, sizeof(scratchbuf), 1,
disp & 0xffff);
oappend (scratchbuf);
}
}
}
static void
OP_G (int bytemode, int sizeflag)
{
int add = 0;
USED_REX (REX_R);
if (rex & REX_R)
add += 8;
switch (bytemode)
{
case b_mode:
USED_REX (0);
if (rex)
oappend (names8rex[modrm.reg + add]);
else
oappend (names8[modrm.reg + add]);
break;
case w_mode:
oappend (names16[modrm.reg + add]);
break;
case d_mode:
oappend (names32[modrm.reg + add]);
break;
case q_mode:
oappend (names64[modrm.reg + add]);
break;
case v_mode:
case dq_mode:
case dqb_mode:
case dqd_mode:
case dqw_mode:
USED_REX (REX_W);
if (rex & REX_W)
oappend (names64[modrm.reg + add]);
else if ((sizeflag & DFLAG) || bytemode != v_mode)
oappend (names32[modrm.reg + add]);
else
oappend (names16[modrm.reg + add]);
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case m_mode:
if (address_mode == mode_64bit)
oappend (names64[modrm.reg + add]);
else
oappend (names32[modrm.reg + add]);
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
break;
}
}
static void
OP_vvvv (int bytemode, int sizeflags)
{
USED_REX (REX_W);
if (rex & REX_W) {
oappend(names64[vex_reg]);
} else {
oappend(names32[vex_reg]);
}
}
static bfd_vma
get64 (void)
{
bfd_vma x;
#ifdef BFD64
unsigned int a;
unsigned int b;
fetch_data(the_info, codep + 8);
a = *codep++ & 0xff;
a |= (*codep++ & 0xff) << 8;
a |= (*codep++ & 0xff) << 16;
a |= (*codep++ & 0xff) << 24;
b = *codep++ & 0xff;
b |= (*codep++ & 0xff) << 8;
b |= (*codep++ & 0xff) << 16;
b |= (*codep++ & 0xff) << 24;
x = a + ((bfd_vma) b << 32);
#else
abort ();
x = 0;
#endif
return x;
}
static bfd_signed_vma
get32 (void)
{
bfd_signed_vma x = 0;
fetch_data(the_info, codep + 4);
x = *codep++ & (bfd_signed_vma) 0xff;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
return x;
}
static bfd_signed_vma
get32s (void)
{
bfd_signed_vma x = 0;
fetch_data(the_info, codep + 4);
x = *codep++ & (bfd_signed_vma) 0xff;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31);
return x;
}
static int
get16 (void)
{
int x = 0;
fetch_data(the_info, codep + 2);
x = *codep++ & 0xff;
x |= (*codep++ & 0xff) << 8;
return x;
}
static void
set_op (bfd_vma op, int riprel)
{
op_index[op_ad] = op_ad;
if (address_mode == mode_64bit)
{
op_address[op_ad] = op;
op_riprel[op_ad] = riprel;
}
else
{
/* Mask to get a 32-bit address. */
op_address[op_ad] = op & 0xffffffff;
op_riprel[op_ad] = riprel & 0xffffffff;
}
}
static void
OP_REG (int code, int sizeflag)
{
const char *s;
int add = 0;
USED_REX (REX_B);
if (rex & REX_B)
add = 8;
switch (code)
{
case ax_reg: case cx_reg: case dx_reg: case bx_reg:
case sp_reg: case bp_reg: case si_reg: case di_reg:
s = names16[code - ax_reg + add];
break;
case es_reg: case ss_reg: case cs_reg:
case ds_reg: case fs_reg: case gs_reg:
s = names_seg[code - es_reg + add];
break;
case al_reg: case ah_reg: case cl_reg: case ch_reg:
case dl_reg: case dh_reg: case bl_reg: case bh_reg:
USED_REX (0);
if (rex)
s = names8rex[code - al_reg + add];
else
s = names8[code - al_reg];
break;
case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg:
case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg:
if (address_mode == mode_64bit && (sizeflag & DFLAG))
{
s = names64[code - rAX_reg + add];
break;
}
code += eAX_reg - rAX_reg;
/* Fall through. */
case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
USED_REX (REX_W);
if (rex & REX_W)
s = names64[code - eAX_reg + add];
else if (sizeflag & DFLAG)
s = names32[code - eAX_reg + add];
else
s = names16[code - eAX_reg + add];
used_prefixes |= (prefixes & PREFIX_DATA);
break;
default:
s = INTERNAL_DISASSEMBLER_ERROR;
break;
}
oappend (s);
}
static void
OP_IMREG (int code, int sizeflag)
{
const char *s;
switch (code)
{
case indir_dx_reg:
if (intel_syntax)
s = "dx";
else
s = "(%dx)";
break;
case ax_reg: case cx_reg: case dx_reg: case bx_reg:
case sp_reg: case bp_reg: case si_reg: case di_reg:
s = names16[code - ax_reg];
break;
case es_reg: case ss_reg: case cs_reg:
case ds_reg: case fs_reg: case gs_reg:
s = names_seg[code - es_reg];
break;
case al_reg: case ah_reg: case cl_reg: case ch_reg:
case dl_reg: case dh_reg: case bl_reg: case bh_reg:
USED_REX (0);
if (rex)
s = names8rex[code - al_reg];
else
s = names8[code - al_reg];
break;
case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
USED_REX (REX_W);
if (rex & REX_W)
s = names64[code - eAX_reg];
else if (sizeflag & DFLAG)
s = names32[code - eAX_reg];
else
s = names16[code - eAX_reg];
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case z_mode_ax_reg:
if ((rex & REX_W) || (sizeflag & DFLAG))
s = *names32;
else
s = *names16;
if (!(rex & REX_W))
used_prefixes |= (prefixes & PREFIX_DATA);
break;
default:
s = INTERNAL_DISASSEMBLER_ERROR;
break;
}
oappend (s);
}
static void
OP_I (int bytemode, int sizeflag)
{
bfd_signed_vma op;
bfd_signed_vma mask = -1;
switch (bytemode)
{
case b_mode:
fetch_data(the_info, codep + 1);
op = *codep++;
mask = 0xff;
break;
case q_mode:
if (address_mode == mode_64bit)
{
op = get32s ();
break;
}
/* Fall through. */
case v_mode:
USED_REX (REX_W);
if (rex & REX_W)
op = get32s ();
else if (sizeflag & DFLAG)
{
op = get32 ();
mask = 0xffffffff;
}
else
{
op = get16 ();
mask = 0xfffff;
}
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case w_mode:
mask = 0xfffff;
op = get16 ();
break;
case const_1_mode:
if (intel_syntax)
oappend ("1");
return;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
return;
}
op &= mask;
scratchbuf[0] = '$';
print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op);
oappend (scratchbuf + intel_syntax);
scratchbuf[0] = '\0';
}
static void
OP_I64 (int bytemode, int sizeflag)
{
bfd_signed_vma op;
bfd_signed_vma mask = -1;
if (address_mode != mode_64bit)
{
OP_I (bytemode, sizeflag);
return;
}
switch (bytemode)
{
case b_mode:
fetch_data(the_info, codep + 1);
op = *codep++;
mask = 0xff;
break;
case v_mode:
USED_REX (REX_W);
if (rex & REX_W)
op = get64 ();
else if (sizeflag & DFLAG)
{
op = get32 ();
mask = 0xffffffff;
}
else
{
op = get16 ();
mask = 0xfffff;
}
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case w_mode:
mask = 0xfffff;
op = get16 ();
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
return;
}
op &= mask;
scratchbuf[0] = '$';
print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op);
oappend (scratchbuf + intel_syntax);
scratchbuf[0] = '\0';
}
static void
OP_sI (int bytemode, int sizeflag)
{
bfd_signed_vma op;
switch (bytemode)
{
case b_mode:
fetch_data(the_info, codep + 1);
op = *codep++;
if ((op & 0x80) != 0)
op -= 0x100;
break;
case v_mode:
USED_REX (REX_W);
if (rex & REX_W)
op = get32s ();
else if (sizeflag & DFLAG)
{
op = get32s ();
}
else
{
op = get16 ();
if ((op & 0x8000) != 0)
op -= 0x10000;
}
used_prefixes |= (prefixes & PREFIX_DATA);
break;
case w_mode:
op = get16 ();
if ((op & 0x8000) != 0)
op -= 0x10000;
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
return;
}
scratchbuf[0] = '$';
print_operand_value (scratchbuf + 1, sizeof(scratchbuf) - 1, 1, op);
oappend (scratchbuf + intel_syntax);
}
static void
OP_J (int bytemode, int sizeflag)
{
bfd_vma disp;
bfd_vma mask = -1;
bfd_vma segment = 0;
switch (bytemode)
{
case b_mode:
fetch_data(the_info, codep + 1);
disp = *codep++;
if ((disp & 0x80) != 0)
disp -= 0x100;
break;
case v_mode:
if ((sizeflag & DFLAG) || (rex & REX_W))
disp = get32s ();
else
{
disp = get16 ();
if ((disp & 0x8000) != 0)
disp -= 0x10000;
/* In 16bit mode, address is wrapped around at 64k within
the same segment. Otherwise, a data16 prefix on a jump
instruction means that the pc is masked to 16 bits after
the displacement is added! */
mask = 0xffff;
if ((prefixes & PREFIX_DATA) == 0)
segment = ((start_pc + codep - start_codep)
& ~((bfd_vma) 0xffff));
}
used_prefixes |= (prefixes & PREFIX_DATA);
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
return;
}
disp = ((start_pc + codep - start_codep + disp) & mask) | segment;
set_op (disp, 0);
print_operand_value (scratchbuf, sizeof(scratchbuf), 1, disp);
oappend (scratchbuf);
}
static void
OP_SEG (int bytemode, int sizeflag)
{
if (bytemode == w_mode)
oappend (names_seg[modrm.reg]);
else
OP_E (modrm.mod == 3 ? bytemode : w_mode, sizeflag);
}
static void
OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag)
{
int seg, offset;
if (sizeflag & DFLAG)
{
offset = get32 ();
seg = get16 ();
}
else
{
offset = get16 ();
seg = get16 ();
}
used_prefixes |= (prefixes & PREFIX_DATA);
if (intel_syntax)
snprintf (scratchbuf, sizeof(scratchbuf), "0x%x:0x%x", seg, offset);
else
snprintf (scratchbuf, sizeof(scratchbuf), "$0x%x,$0x%x", seg, offset);
oappend (scratchbuf);
}
static void
OP_OFF (int bytemode, int sizeflag)
{
bfd_vma off;
if (intel_syntax && (sizeflag & SUFFIX_ALWAYS))
intel_operand_size (bytemode, sizeflag);
append_seg ();
if ((sizeflag & AFLAG) || address_mode == mode_64bit)
off = get32 ();
else
off = get16 ();
if (intel_syntax)
{
if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
| PREFIX_ES | PREFIX_FS | PREFIX_GS)))
{
oappend (names_seg[ds_reg - es_reg]);
oappend (":");
}
}
print_operand_value (scratchbuf, sizeof(scratchbuf), 1, off);
oappend (scratchbuf);
}
static void
OP_OFF64 (int bytemode, int sizeflag)
{
bfd_vma off;
if (address_mode != mode_64bit
|| (prefixes & PREFIX_ADDR))
{
OP_OFF (bytemode, sizeflag);
return;
}
if (intel_syntax && (sizeflag & SUFFIX_ALWAYS))
intel_operand_size (bytemode, sizeflag);
append_seg ();
off = get64 ();
if (intel_syntax)
{
if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
| PREFIX_ES | PREFIX_FS | PREFIX_GS)))
{
oappend (names_seg[ds_reg - es_reg]);
oappend (":");
}
}
print_operand_value (scratchbuf, sizeof(scratchbuf), 1, off);
oappend (scratchbuf);
}
static void
ptr_reg (int code, int sizeflag)
{
const char *s;
*obufp++ = open_char;
used_prefixes |= (prefixes & PREFIX_ADDR);
if (address_mode == mode_64bit)
{
if (!(sizeflag & AFLAG))
s = names32[code - eAX_reg];
else
s = names64[code - eAX_reg];
}
else if (sizeflag & AFLAG)
s = names32[code - eAX_reg];
else
s = names16[code - eAX_reg];
oappend (s);
*obufp++ = close_char;
*obufp = 0;
}
static void
OP_ESreg (int code, int sizeflag)
{
if (intel_syntax)
{
switch (codep[-1])
{
case 0x6d: /* insw/insl */
intel_operand_size (z_mode, sizeflag);
break;
case 0xa5: /* movsw/movsl/movsq */
case 0xa7: /* cmpsw/cmpsl/cmpsq */
case 0xab: /* stosw/stosl */
case 0xaf: /* scasw/scasl */
intel_operand_size (v_mode, sizeflag);
break;
default:
intel_operand_size (b_mode, sizeflag);
}
}
oappend ("%es:" + intel_syntax);
ptr_reg (code, sizeflag);
}
static void
OP_DSreg (int code, int sizeflag)
{
if (intel_syntax)
{
switch (codep[-1])
{
case 0x6f: /* outsw/outsl */
intel_operand_size (z_mode, sizeflag);
break;
case 0xa5: /* movsw/movsl/movsq */
case 0xa7: /* cmpsw/cmpsl/cmpsq */
case 0xad: /* lodsw/lodsl/lodsq */
intel_operand_size (v_mode, sizeflag);
break;
default:
intel_operand_size (b_mode, sizeflag);
}
}
if ((prefixes
& (PREFIX_CS
| PREFIX_DS
| PREFIX_SS
| PREFIX_ES
| PREFIX_FS
| PREFIX_GS)) == 0)
prefixes |= PREFIX_DS;
append_seg ();
ptr_reg (code, sizeflag);
}
static void
OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
int add = 0;
if (rex & REX_R)
{
USED_REX (REX_R);
add = 8;
}
else if (address_mode != mode_64bit && (prefixes & PREFIX_LOCK))
{
used_prefixes |= PREFIX_LOCK;
add = 8;
}
snprintf (scratchbuf, sizeof(scratchbuf), "%%cr%d", modrm.reg + add);
oappend (scratchbuf + intel_syntax);
}
static void
OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
int add = 0;
USED_REX (REX_R);
if (rex & REX_R)
add = 8;
if (intel_syntax)
snprintf (scratchbuf, sizeof(scratchbuf), "db%d", modrm.reg + add);
else
snprintf (scratchbuf, sizeof(scratchbuf), "%%db%d", modrm.reg + add);
oappend (scratchbuf);
}
static void
OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
snprintf (scratchbuf, sizeof(scratchbuf), "%%tr%d", modrm.reg);
oappend (scratchbuf + intel_syntax);
}
static void
OP_R (int bytemode, int sizeflag)
{
if (modrm.mod == 3)
OP_E (bytemode, sizeflag);
else
BadOp ();
}
static void
OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
used_prefixes |= (prefixes & PREFIX_DATA);
if (prefixes & PREFIX_DATA)
{
int add = 0;
USED_REX (REX_R);
if (rex & REX_R)
add = 8;
snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.reg + add);
}
else
snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.reg);
oappend (scratchbuf + intel_syntax);
}
static void
OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
int add = 0;
USED_REX (REX_R);
if (rex & REX_R)
add = 8;
snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.reg + add);
oappend (scratchbuf + intel_syntax);
}
static void
OP_EM (int bytemode, int sizeflag)
{
if (modrm.mod != 3)
{
if (intel_syntax && bytemode == v_mode)
{
bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode;
used_prefixes |= (prefixes & PREFIX_DATA);
}
OP_E (bytemode, sizeflag);
return;
}
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
used_prefixes |= (prefixes & PREFIX_DATA);
if (prefixes & PREFIX_DATA)
{
int add = 0;
USED_REX (REX_B);
if (rex & REX_B)
add = 8;
snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.rm + add);
}
else
snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.rm);
oappend (scratchbuf + intel_syntax);
}
/* cvt* are the only instructions in sse2 which have
both SSE and MMX operands and also have 0x66 prefix
in their opcode. 0x66 was originally used to differentiate
between SSE and MMX instruction(operands). So we have to handle the
cvt* separately using OP_EMC and OP_MXC */
static void
OP_EMC (int bytemode, int sizeflag)
{
if (modrm.mod != 3)
{
if (intel_syntax && bytemode == v_mode)
{
bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode;
used_prefixes |= (prefixes & PREFIX_DATA);
}
OP_E (bytemode, sizeflag);
return;
}
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
used_prefixes |= (prefixes & PREFIX_DATA);
snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.rm);
oappend (scratchbuf + intel_syntax);
}
static void
OP_MXC (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
used_prefixes |= (prefixes & PREFIX_DATA);
snprintf (scratchbuf, sizeof(scratchbuf), "%%mm%d", modrm.reg);
oappend (scratchbuf + intel_syntax);
}
static void
OP_EX (int bytemode, int sizeflag)
{
int add = 0;
if (modrm.mod != 3)
{
OP_E (bytemode, sizeflag);
return;
}
USED_REX (REX_B);
if (rex & REX_B)
add = 8;
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", modrm.rm + add);
oappend (scratchbuf + intel_syntax);
}
static void
OP_MS (int bytemode, int sizeflag)
{
if (modrm.mod == 3)
OP_EM (bytemode, sizeflag);
else
BadOp ();
}
static void
OP_XS (int bytemode, int sizeflag)
{
if (modrm.mod == 3)
OP_EX (bytemode, sizeflag);
else
BadOp ();
}
static void
OP_M (int bytemode, int sizeflag)
{
if (modrm.mod == 3)
/* bad bound,lea,lds,les,lfs,lgs,lss,cmpxchg8b,vmptrst modrm */
BadOp ();
else
OP_E (bytemode, sizeflag);
}
static void
OP_0f07 (int bytemode, int sizeflag)
{
if (modrm.mod != 3 || modrm.rm != 0)
BadOp ();
else
OP_E (bytemode, sizeflag);
}
static void
OP_0fae (int bytemode, int sizeflag)
{
if (modrm.mod == 3)
{
if (modrm.reg == 7)
strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence");
if (modrm.reg < 5 || modrm.rm != 0)
{
BadOp (); /* bad sfence, mfence, or lfence */
return;
}
}
else if (modrm.reg != 7)
{
BadOp (); /* bad clflush */
return;
}
OP_E (bytemode, sizeflag);
}
/* NOP is an alias of "xchg %ax,%ax" in 16bit mode, "xchg %eax,%eax" in
32bit mode and "xchg %rax,%rax" in 64bit mode. */
static void
NOP_Fixup1 (int bytemode, int sizeflag)
{
if ((prefixes & PREFIX_DATA) != 0
|| (rex != 0
&& rex != 0x48
&& address_mode == mode_64bit))
OP_REG (bytemode, sizeflag);
else
strcpy (obuf, "nop");
}
static void
NOP_Fixup2 (int bytemode, int sizeflag)
{
if ((prefixes & PREFIX_DATA) != 0
|| (rex != 0
&& rex != 0x48
&& address_mode == mode_64bit))
OP_IMREG (bytemode, sizeflag);
}
static const char *Suffix3DNow[] = {
/* 00 */ NULL, NULL, NULL, NULL,
/* 04 */ NULL, NULL, NULL, NULL,
/* 08 */ NULL, NULL, NULL, NULL,
/* 0C */ "pi2fw", "pi2fd", NULL, NULL,
/* 10 */ NULL, NULL, NULL, NULL,
/* 14 */ NULL, NULL, NULL, NULL,
/* 18 */ NULL, NULL, NULL, NULL,
/* 1C */ "pf2iw", "pf2id", NULL, NULL,
/* 20 */ NULL, NULL, NULL, NULL,
/* 24 */ NULL, NULL, NULL, NULL,
/* 28 */ NULL, NULL, NULL, NULL,
/* 2C */ NULL, NULL, NULL, NULL,
/* 30 */ NULL, NULL, NULL, NULL,
/* 34 */ NULL, NULL, NULL, NULL,
/* 38 */ NULL, NULL, NULL, NULL,
/* 3C */ NULL, NULL, NULL, NULL,
/* 40 */ NULL, NULL, NULL, NULL,
/* 44 */ NULL, NULL, NULL, NULL,
/* 48 */ NULL, NULL, NULL, NULL,
/* 4C */ NULL, NULL, NULL, NULL,
/* 50 */ NULL, NULL, NULL, NULL,
/* 54 */ NULL, NULL, NULL, NULL,
/* 58 */ NULL, NULL, NULL, NULL,
/* 5C */ NULL, NULL, NULL, NULL,
/* 60 */ NULL, NULL, NULL, NULL,
/* 64 */ NULL, NULL, NULL, NULL,
/* 68 */ NULL, NULL, NULL, NULL,
/* 6C */ NULL, NULL, NULL, NULL,
/* 70 */ NULL, NULL, NULL, NULL,
/* 74 */ NULL, NULL, NULL, NULL,
/* 78 */ NULL, NULL, NULL, NULL,
/* 7C */ NULL, NULL, NULL, NULL,
/* 80 */ NULL, NULL, NULL, NULL,
/* 84 */ NULL, NULL, NULL, NULL,
/* 88 */ NULL, NULL, "pfnacc", NULL,
/* 8C */ NULL, NULL, "pfpnacc", NULL,
/* 90 */ "pfcmpge", NULL, NULL, NULL,
/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt",
/* 98 */ NULL, NULL, "pfsub", NULL,
/* 9C */ NULL, NULL, "pfadd", NULL,
/* A0 */ "pfcmpgt", NULL, NULL, NULL,
/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1",
/* A8 */ NULL, NULL, "pfsubr", NULL,
/* AC */ NULL, NULL, "pfacc", NULL,
/* B0 */ "pfcmpeq", NULL, NULL, NULL,
/* B4 */ "pfmul", NULL, "pfrcpit2", "pmulhrw",
/* B8 */ NULL, NULL, NULL, "pswapd",
/* BC */ NULL, NULL, NULL, "pavgusb",
/* C0 */ NULL, NULL, NULL, NULL,
/* C4 */ NULL, NULL, NULL, NULL,
/* C8 */ NULL, NULL, NULL, NULL,
/* CC */ NULL, NULL, NULL, NULL,
/* D0 */ NULL, NULL, NULL, NULL,
/* D4 */ NULL, NULL, NULL, NULL,
/* D8 */ NULL, NULL, NULL, NULL,
/* DC */ NULL, NULL, NULL, NULL,
/* E0 */ NULL, NULL, NULL, NULL,
/* E4 */ NULL, NULL, NULL, NULL,
/* E8 */ NULL, NULL, NULL, NULL,
/* EC */ NULL, NULL, NULL, NULL,
/* F0 */ NULL, NULL, NULL, NULL,
/* F4 */ NULL, NULL, NULL, NULL,
/* F8 */ NULL, NULL, NULL, NULL,
/* FC */ NULL, NULL, NULL, NULL,
};
static void
OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
const char *mnemonic;
fetch_data(the_info, codep + 1);
/* AMD 3DNow! instructions are specified by an opcode suffix in the
place where an 8-bit immediate would normally go. ie. the last
byte of the instruction. */
obufp = obuf + strlen (obuf);
mnemonic = Suffix3DNow[*codep++ & 0xff];
if (mnemonic)
oappend (mnemonic);
else
{
/* Since a variable sized modrm/sib chunk is between the start
of the opcode (0x0f0f) and the opcode suffix, we need to do
all the modrm processing first, and don't know until now that
we have a bad opcode. This necessitates some cleaning up. */
op_out[0][0] = '\0';
op_out[1][0] = '\0';
BadOp ();
}
}
static const char *simd_cmp_op[] = {
"eq",
"lt",
"le",
"unord",
"neq",
"nlt",
"nle",
"ord"
};
static void
OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
{
unsigned int cmp_type;
fetch_data(the_info, codep + 1);
obufp = obuf + strlen (obuf);
cmp_type = *codep++ & 0xff;
if (cmp_type < 8)
{
char suffix1 = 'p', suffix2 = 's';
used_prefixes |= (prefixes & PREFIX_REPZ);
if (prefixes & PREFIX_REPZ)
suffix1 = 's';
else
{
used_prefixes |= (prefixes & PREFIX_DATA);
if (prefixes & PREFIX_DATA)
suffix2 = 'd';
else
{
used_prefixes |= (prefixes & PREFIX_REPNZ);
if (prefixes & PREFIX_REPNZ)
suffix1 = 's', suffix2 = 'd';
}
}
snprintf (scratchbuf, sizeof(scratchbuf), "cmp%s%c%c",
simd_cmp_op[cmp_type], suffix1, suffix2);
used_prefixes |= (prefixes & PREFIX_REPZ);
oappend (scratchbuf);
}
else
{
/* We have a bad extension byte. Clean up. */
op_out[0][0] = '\0';
op_out[1][0] = '\0';
BadOp ();
}
}
static void
SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED)
{
/* Change movlps/movhps to movhlps/movlhps for 2 register operand
forms of these instructions. */
if (modrm.mod == 3)
{
char *p = obuf + strlen (obuf);
*(p + 1) = '\0';
*p = *(p - 1);
*(p - 1) = *(p - 2);
*(p - 2) = *(p - 3);
*(p - 3) = extrachar;
}
}
static void
PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
{
if (modrm.mod == 3 && modrm.reg == 1 && modrm.rm <= 1)
{
/* Override "sidt". */
size_t olen = strlen (obuf);
char *p = obuf + olen - 4;
const char * const *names = (address_mode == mode_64bit
? names64 : names32);
/* We might have a suffix when disassembling with -Msuffix. */
if (*p == 'i')
--p;
/* Remove "addr16/addr32" if we aren't in Intel mode. */
if (!intel_syntax
&& (prefixes & PREFIX_ADDR)
&& olen >= (4 + 7)
&& *(p - 1) == ' '
&& strncmp (p - 7, "addr", 4) == 0
&& (strncmp (p - 3, "16", 2) == 0
|| strncmp (p - 3, "32", 2) == 0))
p -= 7;
if (modrm.rm)
{
/* mwait %eax,%ecx */
strcpy (p, "mwait");
if (!intel_syntax)
strcpy (op_out[0], names[0]);
}
else
{
/* monitor %eax,%ecx,%edx" */
strcpy (p, "monitor");
if (!intel_syntax)
{
const char * const *op1_names;
if (!(prefixes & PREFIX_ADDR))
op1_names = (address_mode == mode_16bit
? names16 : names);
else
{
op1_names = (address_mode != mode_32bit
? names32 : names16);
used_prefixes |= PREFIX_ADDR;
}
strcpy (op_out[0], op1_names[0]);
strcpy (op_out[2], names[2]);
}
}
if (!intel_syntax)
{
strcpy (op_out[1], names[1]);
two_source_ops = 1;
}
codep++;
}
else
OP_M (0, sizeflag);
}
static void
SVME_Fixup (int bytemode, int sizeflag)
{
const char *alt;
char *p;
switch (*codep)
{
case 0xd8:
alt = "vmrun";
break;
case 0xd9:
alt = "vmmcall";
break;
case 0xda:
alt = "vmload";
break;
case 0xdb:
alt = "vmsave";
break;
case 0xdc:
alt = "stgi";
break;
case 0xdd:
alt = "clgi";
break;
case 0xde:
alt = "skinit";
break;
case 0xdf:
alt = "invlpga";
break;
default:
OP_M (bytemode, sizeflag);
return;
}
/* Override "lidt". */
p = obuf + strlen (obuf) - 4;
/* We might have a suffix. */
if (*p == 'i')
--p;
strcpy (p, alt);
if (!(prefixes & PREFIX_ADDR))
{
++codep;
return;
}
used_prefixes |= PREFIX_ADDR;
switch (*codep++)
{
case 0xdf:
strcpy (op_out[1], names32[1]);
two_source_ops = 1;
/* Fall through. */
case 0xd8:
case 0xda:
case 0xdb:
*obufp++ = open_char;
if (address_mode == mode_64bit || (sizeflag & AFLAG))
alt = names32[0];
else
alt = names16[0];
strcpy (obufp, alt);
obufp += strlen (alt);
*obufp++ = close_char;
*obufp = '\0';
break;
}
}
static void
INVLPG_Fixup (int bytemode, int sizeflag)
{
const char *alt;
switch (*codep)
{
case 0xf8:
alt = "swapgs";
break;
case 0xf9:
alt = "rdtscp";
break;
default:
OP_M (bytemode, sizeflag);
return;
}
/* Override "invlpg". */
strcpy (obuf + strlen (obuf) - 6, alt);
codep++;
}
static void
BadOp (void)
{
/* Throw away prefixes and 1st. opcode byte. */
codep = insn_codep + 1;
oappend ("(bad)");
}
static void
VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
{
if (modrm.mod == 3
&& modrm.reg == 0
&& modrm.rm >=1
&& modrm.rm <= 4)
{
/* Override "sgdt". */
char *p = obuf + strlen (obuf) - 4;
/* We might have a suffix when disassembling with -Msuffix. */
if (*p == 'g')
--p;
switch (modrm.rm)
{
case 1:
strcpy (p, "vmcall");
break;
case 2:
strcpy (p, "vmlaunch");
break;
case 3:
strcpy (p, "vmresume");
break;
case 4:
strcpy (p, "vmxoff");
break;
}
codep++;
}
else
OP_E (0, sizeflag);
}
static void
OP_VMX (int bytemode, int sizeflag)
{
used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ));
if (prefixes & PREFIX_DATA)
strcpy (obuf, "vmclear");
else if (prefixes & PREFIX_REPZ)
strcpy (obuf, "vmxon");
else
strcpy (obuf, "vmptrld");
OP_E (bytemode, sizeflag);
}
static void
REP_Fixup (int bytemode, int sizeflag)
{
/* The 0xf3 prefix should be displayed as "rep" for ins, outs, movs,
lods and stos. */
size_t ilen = 0;
if (prefixes & PREFIX_REPZ)
switch (*insn_codep)
{
case 0x6e: /* outsb */
case 0x6f: /* outsw/outsl */
case 0xa4: /* movsb */
case 0xa5: /* movsw/movsl/movsq */
if (!intel_syntax)
ilen = 5;
else
ilen = 4;
break;
case 0xaa: /* stosb */
case 0xab: /* stosw/stosl/stosq */
case 0xac: /* lodsb */
case 0xad: /* lodsw/lodsl/lodsq */
if (!intel_syntax && (sizeflag & SUFFIX_ALWAYS))
ilen = 5;
else
ilen = 4;
break;
case 0x6c: /* insb */
case 0x6d: /* insl/insw */
if (!intel_syntax)
ilen = 4;
else
ilen = 3;
break;
default:
abort ();
break;
}
if (ilen != 0)
{
size_t olen;
char *p;
olen = strlen (obuf);
p = obuf + olen - ilen - 1 - 4;
/* Handle "repz [addr16|addr32]". */
if ((prefixes & PREFIX_ADDR))
p -= 1 + 6;
memmove (p + 3, p + 4, olen - (p + 3 - obuf));
}
switch (bytemode)
{
case al_reg:
case eAX_reg:
case indir_dx_reg:
OP_IMREG (bytemode, sizeflag);
break;
case eDI_reg:
OP_ESreg (bytemode, sizeflag);
break;
case eSI_reg:
OP_DSreg (bytemode, sizeflag);
break;
default:
abort ();
break;
}
}
static void
CMPXCHG8B_Fixup (int bytemode, int sizeflag)
{
USED_REX (REX_W);
if (rex & REX_W)
{
/* Change cmpxchg8b to cmpxchg16b. */
char *p = obuf + strlen (obuf) - 2;
strcpy (p, "16b");
bytemode = o_mode;
}
OP_M (bytemode, sizeflag);
}
static void
XMM_Fixup (int reg, int sizeflag ATTRIBUTE_UNUSED)
{
snprintf (scratchbuf, sizeof(scratchbuf), "%%xmm%d", reg);
oappend (scratchbuf + intel_syntax);
}
static void
CRC32_Fixup (int bytemode, int sizeflag)
{
/* Add proper suffix to "crc32". */
char *p = obuf + strlen (obuf);
switch (bytemode)
{
case b_mode:
if (intel_syntax)
break;
*p++ = 'b';
break;
case v_mode:
if (intel_syntax)
break;
USED_REX (REX_W);
if (rex & REX_W)
*p++ = 'q';
else if (sizeflag & DFLAG)
*p++ = 'l';
else
*p++ = 'w';
used_prefixes |= (prefixes & PREFIX_DATA);
break;
default:
oappend (INTERNAL_DISASSEMBLER_ERROR);
break;
}
*p = '\0';
if (modrm.mod == 3)
{
int add;
/* Skip mod/rm byte. */
MODRM_CHECK;
codep++;
USED_REX (REX_B);
add = (rex & REX_B) ? 8 : 0;
if (bytemode == b_mode)
{
USED_REX (0);
if (rex)
oappend (names8rex[modrm.rm + add]);
else
oappend (names8[modrm.rm + add]);
}
else
{
USED_REX (REX_W);
if (rex & REX_W)
oappend (names64[modrm.rm + add]);
else if ((prefixes & PREFIX_DATA))
oappend (names16[modrm.rm + add]);
else
oappend (names32[modrm.rm + add]);
}
}
else
OP_E (bytemode, sizeflag);
}
| gpl-2.0 |
Warter21/linux-4.0_imx6 | arch/s390/mm/hugetlbpage.c | 362 | 5637 | /*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright IBM Corp. 2007
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
static inline pmd_t __pte_to_pmd(pte_t pte)
{
pmd_t pmd;
/*
* Convert encoding pte bits pmd bits
* .IR...wrdytp dy..R...I...wr
* empty .10...000000 -> 00..0...1...00
* prot-none, clean, old .11...000001 -> 00..1...1...00
* prot-none, clean, young .11...000101 -> 01..1...1...00
* prot-none, dirty, old .10...001001 -> 10..1...1...00
* prot-none, dirty, young .10...001101 -> 11..1...1...00
* read-only, clean, old .11...010001 -> 00..1...1...01
* read-only, clean, young .01...010101 -> 01..1...0...01
* read-only, dirty, old .11...011001 -> 10..1...1...01
* read-only, dirty, young .01...011101 -> 11..1...0...01
* read-write, clean, old .11...110001 -> 00..0...1...11
* read-write, clean, young .01...110101 -> 01..0...0...11
* read-write, dirty, old .10...111001 -> 10..0...1...11
* read-write, dirty, young .00...111101 -> 11..0...0...11
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
} else
pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pte_t __pmd_to_pte(pmd_t pmd)
{
pte_t pte;
/*
* Convert encoding pmd bits pte bits
* dy..R...I...wr .IR...wrdytp
* empty 00..0...1...00 -> .10...001100
* prot-none, clean, old 00..0...1...00 -> .10...000001
* prot-none, clean, young 01..0...1...00 -> .10...000101
* prot-none, dirty, old 10..0...1...00 -> .10...001001
* prot-none, dirty, young 11..0...1...00 -> .10...001101
* read-only, clean, old 00..1...1...01 -> .11...010001
* read-only, clean, young 01..1...1...01 -> .11...010101
* read-only, dirty, old 10..1...1...01 -> .11...011001
* read-only, dirty, young 11..1...1...01 -> .11...011101
* read-write, clean, old 00..0...1...11 -> .10...110001
* read-write, clean, young 01..0...1...11 -> .10...110101
* read-write, dirty, old 10..0...1...11 -> .10...111001
* read-write, dirty, young 11..0...1...11 -> .10...111101
*/
if (pmd_present(pmd)) {
pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
pmd_t pmd;
pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) {
/* Emulated huge ptes loose the dirty and young bit */
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= pte_page(pte)[1].index;
} else
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
*(pmd_t *) ptep = pmd;
}
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long origin;
pmd_t pmd;
pmd = *(pmd_t *) ptep;
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
pmd_val(pmd) |= *(unsigned long *) origin;
/* Emulated huge ptes are young and dirty by definition */
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
}
return __pmd_to_pte(pmd);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
pmdp_flush_direct(mm, addr, pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
}
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
pte_val(pte) = addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
void arch_release_hugepage(struct page *page)
{
pte_t *ptep;
if (MACHINE_HAS_HPAGE)
return;
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
pudp = pud_alloc(mm, pgdp, addr);
if (pudp)
pmdp = pmd_alloc(mm, pudp, addr);
return (pte_t *) pmdp;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
if (pgd_present(*pgdp)) {
pudp = pud_offset(pgdp, addr);
if (pud_present(*pudp))
pmdp = pmd_offset(pudp, addr);
}
return (pte_t *) pmdp;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
int pmd_huge(pmd_t pmd)
{
if (!MACHINE_HAS_HPAGE)
return 0;
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
}
int pud_huge(pud_t pud)
{
return 0;
}
| gpl-2.0 |
BenHuiHui/linux | drivers/watchdog/diag288_wdt.c | 618 | 7569 | /*
* Watchdog driver for z/VM and LPAR using the diag 288 interface.
*
* Under z/VM, expiration of the watchdog will send a "system restart" command
* to CP.
*
* The command can be altered using the module parameter "cmd". This is
* not recommended because it's only supported on z/VM but not whith LPAR.
*
* On LPAR, the watchdog will always trigger a system restart. the module
* paramter cmd is meaningless here.
*
*
* Copyright IBM Corp. 2004, 2013
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Philipp Hachtmann (phacht@de.ibm.com)
*
*/
#define KMSG_COMPONENT "diag288_wdt"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/suspend.h>
#include <asm/ebcdic.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#define MAX_CMDLEN 240
#define DEFAULT_CMD "SYSTEM RESTART"
#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */
#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */
#define WDT_DEFAULT_TIMEOUT 30
/* Function codes - init, change, cancel */
#define WDT_FUNC_INIT 0
#define WDT_FUNC_CHANGE 1
#define WDT_FUNC_CANCEL 2
#define WDT_FUNC_CONCEAL 0x80000000
/* Action codes for LPAR watchdog */
#define LPARWDT_RESTART 0
static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
static bool conceal_on;
static bool nowayout_info = WATCHDOG_NOWAYOUT;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_AUTHOR("Philipp Hachtmann <phacht@de.ibm.com>");
MODULE_DESCRIPTION("System z diag288 Watchdog Timer");
module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
module_param_named(conceal, conceal_on, bool, 0644);
MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
module_param_named(nowayout, nowayout_info, bool, 0444);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_ALIAS("vmwatchdog");
static int __diag288(unsigned int func, unsigned int timeout,
unsigned long action, unsigned int len)
{
register unsigned long __func asm("2") = func;
register unsigned long __timeout asm("3") = timeout;
register unsigned long __action asm("4") = action;
register unsigned long __len asm("5") = len;
int err;
err = -EINVAL;
asm volatile(
" diag %1, %3, 0x288\n"
"0: la %0, 0\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+d" (err) : "d"(__func), "d"(__timeout),
"d"(__action), "d"(__len) : "1", "cc");
return err;
}
static int __diag288_vm(unsigned int func, unsigned int timeout,
char *cmd, size_t len)
{
return __diag288(func, timeout, virt_to_phys(cmd), len);
}
static int __diag288_lpar(unsigned int func, unsigned int timeout,
unsigned long action)
{
return __diag288(func, timeout, action, 0);
}
static int wdt_start(struct watchdog_device *dev)
{
char *ebc_cmd;
size_t len;
int ret;
unsigned int func;
ret = -ENODEV;
if (MACHINE_IS_VM) {
ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!ebc_cmd)
return -ENOMEM;
len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
ASCEBC(ebc_cmd, MAX_CMDLEN);
EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
} else {
ret = __diag288_lpar(WDT_FUNC_INIT,
dev->timeout, LPARWDT_RESTART);
}
if (ret) {
pr_err("The watchdog cannot be activated\n");
return ret;
}
return 0;
}
static int wdt_stop(struct watchdog_device *dev)
{
int ret;
ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
return ret;
}
static int wdt_ping(struct watchdog_device *dev)
{
char *ebc_cmd;
size_t len;
int ret;
unsigned int func;
ret = -ENODEV;
if (MACHINE_IS_VM) {
ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
if (!ebc_cmd)
return -ENOMEM;
len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
ASCEBC(ebc_cmd, MAX_CMDLEN);
EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
/*
* It seems to be ok to z/VM to use the init function to
* retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
* be used when the watchdog is running.
*/
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
WARN_ON(ret != 0);
kfree(ebc_cmd);
} else {
ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
}
if (ret)
pr_err("The watchdog timer cannot be started or reset\n");
return ret;
}
static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
{
dev->timeout = new_to;
return wdt_ping(dev);
}
static struct watchdog_ops wdt_ops = {
.owner = THIS_MODULE,
.start = wdt_start,
.stop = wdt_stop,
.ping = wdt_ping,
.set_timeout = wdt_set_timeout,
};
static struct watchdog_info wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
.firmware_version = 0,
.identity = "z Watchdog",
};
static struct watchdog_device wdt_dev = {
.parent = NULL,
.info = &wdt_info,
.ops = &wdt_ops,
.bootstatus = 0,
.timeout = WDT_DEFAULT_TIMEOUT,
.min_timeout = MIN_INTERVAL,
.max_timeout = MAX_INTERVAL,
};
/*
* It makes no sense to go into suspend while the watchdog is running.
* Depending on the memory size, the watchdog might trigger, while we
* are still saving the memory.
* We reuse the open flag to ensure that suspend and watchdog open are
* exclusive operations
*/
static int wdt_suspend(void)
{
if (test_and_set_bit(WDOG_DEV_OPEN, &wdt_dev.status)) {
pr_err("Linux cannot be suspended while the watchdog is in use\n");
return notifier_from_errno(-EBUSY);
}
if (test_bit(WDOG_ACTIVE, &wdt_dev.status)) {
clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
pr_err("Linux cannot be suspended while the watchdog is in use\n");
return notifier_from_errno(-EBUSY);
}
return NOTIFY_DONE;
}
static int wdt_resume(void)
{
clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
return NOTIFY_DONE;
}
static int wdt_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
switch (event) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
return wdt_resume();
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
return wdt_suspend();
default:
return NOTIFY_DONE;
}
}
static struct notifier_block wdt_power_notifier = {
.notifier_call = wdt_power_event,
};
static int __init diag288_init(void)
{
int ret;
char ebc_begin[] = {
194, 197, 199, 201, 213
};
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
if (__diag288_vm(WDT_FUNC_INIT, 15,
ebc_begin, sizeof(ebc_begin)) != 0) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
} else {
if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
}
if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
pr_err("The watchdog cannot be deactivated\n");
return -EINVAL;
}
ret = register_pm_notifier(&wdt_power_notifier);
if (ret)
return ret;
ret = watchdog_register_device(&wdt_dev);
if (ret)
unregister_pm_notifier(&wdt_power_notifier);
return ret;
}
static void __exit diag288_exit(void)
{
watchdog_unregister_device(&wdt_dev);
unregister_pm_notifier(&wdt_power_notifier);
}
module_init(diag288_init);
module_exit(diag288_exit);
| gpl-2.0 |
bigzz/linux-xfs | drivers/net/ethernet/intel/igbvf/ethtool.c | 618 | 13213 | /*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for igbvf */
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include "igbvf.h"
#include <linux/if_vlan.h>
struct igbvf_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int base_stat_offset;
};
#define IGBVF_STAT(current, base) \
sizeof(((struct igbvf_adapter *)0)->current), \
offsetof(struct igbvf_adapter, current), \
offsetof(struct igbvf_adapter, base)
static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
{ "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
{ "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
{ "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
{ "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
};
#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
"Link test (on/offline)"
};
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
static int igbvf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
ecmd->port = -1;
ecmd->transceiver = XCVR_DUMMY1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
ethtool_cmd_speed_set(ecmd, SPEED_100);
else
ethtool_cmd_speed_set(ecmd, SPEED_10);
if (status & E1000_STATUS_FD)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
static int igbvf_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
return -EOPNOTSUPP;
}
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
}
static int igbvf_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
return -EOPNOTSUPP;
}
static u32 igbvf_get_msglevel(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static int igbvf_get_regs_len(struct net_device *netdev)
{
#define IGBVF_REGS_LEN 8
return IGBVF_REGS_LEN * sizeof(u32);
}
static void igbvf_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
regs_buff[2] = er32(RDLEN(0));
regs_buff[3] = er32(RDH(0));
regs_buff[4] = er32(RDT(0));
regs_buff[5] = er32(TDLEN(0));
regs_buff[6] = er32(TDH(0));
regs_buff[7] = er32(TDT(0));
}
static int igbvf_get_eeprom_len(struct net_device *netdev)
{
return 0;
}
static int igbvf_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static int igbvf_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igbvf_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
static void igbvf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
struct igbvf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IGBVF_MAX_RXD;
ring->tx_max_pending = IGBVF_MAX_TXD;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
static int igbvf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
int err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD);
new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD);
new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
(new_rx_count == adapter->rx_ring->count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
adapter->tx_ring->count = new_tx_count;
adapter->rx_ring->count = new_rx_count;
goto clear_reset;
}
temp_ring = vmalloc(sizeof(struct igbvf_ring));
if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
igbvf_down(adapter);
/* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
* to the Tx and Rx ring structs.
*/
if (new_tx_count != adapter->tx_ring->count) {
memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_tx_count;
err = igbvf_setup_tx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_tx_resources(adapter->tx_ring);
memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
}
if (new_rx_count != adapter->rx_ring->count) {
memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_rx_count;
err = igbvf_setup_rx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_rx_resources(adapter->rx_ring);
memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring));
}
err_setup:
igbvf_up(adapter);
vfree(temp_ring);
clear_reset:
clear_bit(__IGBVF_RESETTING, &adapter->state);
return err;
}
static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
*data = 0;
hw->mac.ops.check_for_link(hw);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
return *data;
}
static void igbvf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
set_bit(__IGBVF_TESTING, &adapter->state);
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (igbvf_link_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IGBVF_TESTING, &adapter->state);
msleep_interruptible(4 * 1000);
}
static void igbvf_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
}
static int igbvf_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
return -EOPNOTSUPP;
}
static int igbvf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (adapter->requested_itr <= 3)
ec->rx_coalesce_usecs = adapter->requested_itr;
else
ec->rx_coalesce_usecs = adapter->current_itr >> 2;
return 0;
}
static int igbvf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
(ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
adapter->current_itr = ec->rx_coalesce_usecs << 2;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
} else if ((ec->rx_coalesce_usecs == 3) ||
(ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
} else if (ec->rx_coalesce_usecs == 0) {
/* The user's desire is to turn off interrupt throttling
* altogether, but due to HW limitations, we can't do that.
* Instead we set a very small value in EITR, which would
* allow ~967k interrupts per second, but allow the adapter's
* internal clocking to still function properly.
*/
adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
} else {
return -EINVAL;
}
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
return 0;
}
static int igbvf_nway_reset(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
igbvf_reinit_locked(adapter);
return 0;
}
static void igbvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
int i;
igbvf_update_stats(adapter);
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
igbvf_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
igbvf_gstrings_stats[i].base_stat_offset;
data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
(*(u32 *)p - *(u32 *)b));
}
}
static int igbvf_get_sset_count(struct net_device *dev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
return IGBVF_TEST_LEN;
case ETH_SS_STATS:
return IGBVF_GLOBAL_STATS_LEN;
default:
return -EINVAL;
}
}
static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, igbvf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static const struct ethtool_ops igbvf_ethtool_ops = {
.get_settings = igbvf_get_settings,
.set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
.get_wol = igbvf_get_wol,
.set_wol = igbvf_set_wol,
.get_msglevel = igbvf_get_msglevel,
.set_msglevel = igbvf_set_msglevel,
.nway_reset = igbvf_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = igbvf_get_eeprom_len,
.get_eeprom = igbvf_get_eeprom,
.set_eeprom = igbvf_set_eeprom,
.get_ringparam = igbvf_get_ringparam,
.set_ringparam = igbvf_set_ringparam,
.get_pauseparam = igbvf_get_pauseparam,
.set_pauseparam = igbvf_set_pauseparam,
.self_test = igbvf_diag_test,
.get_sset_count = igbvf_get_sset_count,
.get_strings = igbvf_get_strings,
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &igbvf_ethtool_ops;
}
| gpl-2.0 |
penberg/linux | arch/sh/mm/cache-sh2.c | 618 | 2284 | // SPDX-License-Identifier: GPL-2.0-only
/*
* arch/sh/mm/cache-sh2.c
*
* Copyright (C) 2002 Paul Mundt
* Copyright (C) 2008 Yoshinori Sato
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
static void sh2__flush_wback_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
unsigned long addr = CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0);
int way;
for (way = 0; way < 4; way++) {
unsigned long data = __raw_readl(addr | (way << 12));
if ((data & CACHE_PHYSADDR_MASK) == (v & CACHE_PHYSADDR_MASK)) {
data &= ~SH_CACHE_UPDATED;
__raw_writel(data, addr | (way << 12));
}
}
}
}
static void sh2__flush_purge_region(void *start, int size)
{
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES)
__raw_writel((v & CACHE_PHYSADDR_MASK),
CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
}
static void sh2__flush_invalidate_region(void *start, int size)
{
#ifdef CONFIG_CACHE_WRITEBACK
/*
* SH-2 does not support individual line invalidation, only a
* global invalidate.
*/
unsigned long ccr;
unsigned long flags;
local_irq_save(flags);
jump_to_uncached();
ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_INVALIDATE;
__raw_writel(ccr, SH_CCR);
back_to_cached();
local_irq_restore(flags);
#else
unsigned long v;
unsigned long begin, end;
begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
for (v = begin; v < end; v+=L1_CACHE_BYTES)
__raw_writel((v & CACHE_PHYSADDR_MASK),
CACHE_OC_ADDRESS_ARRAY | (v & 0x00000ff0) | 0x00000008);
#endif
}
void __init sh2_cache_init(void)
{
__flush_wback_region = sh2__flush_wback_region;
__flush_purge_region = sh2__flush_purge_region;
__flush_invalidate_region = sh2__flush_invalidate_region;
}
| gpl-2.0 |
ngvincent/android-kernel-oppo-find5 | net/ipv6/tcp_ipv6.c | 1642 | 56966 | /*
* TCP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on:
* linux/net/ipv4/tcp.c
* linux/net/ipv4/tcp_input.c
* linux/net/ipv4/tcp_output.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bottom_half.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/jiffies.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/ipsec.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/random.h>
#include <net/tcp.h>
#include <net/ndisc.h>
#include <net/inet6_hashtables.h>
#include <net/inet6_connection_sock.h>
#include <net/ipv6.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/snmp.h>
#include <net/dsfield.h>
#include <net/timewait_sock.h>
#include <net/netdma.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static void __tcp_v6_send_check(struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr);
static const struct inet_connection_sock_af_ops ipv6_mapped;
static const struct inet_connection_sock_af_ops ipv6_specific;
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
#else
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
const struct in6_addr *addr)
{
return NULL;
}
#endif
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
tcp_prot.hash(sk);
return;
}
local_bh_disable();
__inet6_hash(sk, NULL);
local_bh_enable();
}
}
static __inline__ __sum16 tcp_v6_check(int len,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
__wsum base)
{
return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
}
static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
tcp_hdr(skb)->dest,
tcp_hdr(skb)->source);
}
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct rt6_info *rt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if(ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if(addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
if (tp->rx_opt.ts_recent_stamp &&
!ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
np->daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
* TCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
} else {
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
}
return err;
}
if (!ipv6_addr_any(&np->rcv_saddr))
saddr = &np->rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = np->daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, np->opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (saddr == NULL) {
saddr = &fl6.saddr;
np->rcv_saddr = *saddr;
}
/* set the source address */
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
rt = (struct rt6_info *) dst;
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
struct inet_peer *peer = rt6_get_peer(rt);
/*
* VJ's idea. We save last timestamp seen from
* the destination in peer table, when entering state
* TIME-WAIT * and initialize rx_opt.ts_recent from it,
* when trying new connection.
*/
if (peer) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->rx_opt.ts_recent = peer->tcp_ts;
}
}
}
icsk->icsk_ext_hdr_len = 0;
if (np->opt)
icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
np->opt->opt_nflen);
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
inet->inet_dport = usin->sin6_port;
tcp_set_state(sk, TCP_SYN_SENT);
err = inet6_hash_connect(&tcp_death_row, sk);
if (err)
goto late_failure;
if (!tp->write_seq)
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
np->daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = tcp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
tcp_set_state(sk, TCP_CLOSE);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
struct ipv6_pinfo *np;
struct sock *sk;
int err;
struct tcp_sock *tp;
__u32 seq;
struct net *net = dev_net(skb->dev);
sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
if (sk == NULL) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
goto out;
}
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
!between(seq, tp->snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
np = inet6_sk(sk);
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst;
if (sock_owned_by_user(sk))
goto out;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
goto out;
/* icmp should have updated the destination cache entry */
dst = __sk_dst_check(sk, np->dst_cookie);
if (dst == NULL) {
struct inet_sock *inet = inet_sk(sk);
struct flowi6 fl6;
/* BUGGG_FUTURE: Again, it is not clear how
to handle rthdr case. Ignore this complexity
for now.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = np->daddr;
fl6.saddr = np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
goto out;
}
} else
dst_hold(dst);
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
tcp_sync_mss(sk, dst_mtu(dst));
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
dst_release(dst);
goto out;
}
icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */
switch (sk->sk_state) {
struct request_sock *req, **prev;
case TCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
&hdr->saddr, inet6_iif(skb));
if (!req)
goto out;
/* ICMPs are not backlogged, hence we cannot get
* an established socket here.
*/
WARN_ON(req->sk != NULL);
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen.
It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
struct ipv6_txoptions *opt = NULL;
struct in6_addr * final_p, final;
struct flowi6 fl6;
struct dst_entry *dst;
int err;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = treq->rmt_addr;
fl6.saddr = treq->loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = treq->iif;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
opt = np->opt;
final_p = fl6_update_dst(&fl6, opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto done;
}
skb = tcp_make_synack(sk, dst, req, rvp);
err = -ENOMEM;
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
fl6.daddr = treq->rmt_addr;
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
done:
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
return err;
}
static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
return tcp_v6_send_synack(sk, req, rvp);
}
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
kfree_skb(inet6_rsk(req)->pktopts);
}
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
const struct in6_addr *addr)
{
return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
}
static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
struct sock *addr_sk)
{
return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
}
static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
struct request_sock *req)
{
return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
}
static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
if (optlen < sizeof(cmd))
return -EINVAL;
if (copy_from_user(&cmd, optval, sizeof(cmd)))
return -EFAULT;
if (sin6->sin6_family != AF_INET6)
return -EINVAL;
if (!cmd.tcpm_keylen) {
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET);
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
AF_INET6);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
}
static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
const struct in6_addr *daddr,
const struct in6_addr *saddr, int nbytes)
{
struct tcp6_pseudohdr *bp;
struct scatterlist sg;
bp = &hp->md5_blk.ip6;
/* 1. TCP pseudo-header (RFC2460) */
bp->saddr = *saddr;
bp->daddr = *daddr;
bp->protocol = cpu_to_be32(IPPROTO_TCP);
bp->len = cpu_to_be32(nbytes);
sg_init_one(&sg, bp, sizeof(*bp));
return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
}
static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
const struct in6_addr *daddr, struct in6_addr *saddr,
const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
const struct sock *sk,
const struct request_sock *req,
const struct sk_buff *skb)
{
const struct in6_addr *saddr, *daddr;
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
const struct tcphdr *th = tcp_hdr(skb);
if (sk) {
saddr = &inet6_sk(sk)->saddr;
daddr = &inet6_sk(sk)->daddr;
} else if (req) {
saddr = &inet6_rsk(req)->loc_addr;
daddr = &inet6_rsk(req)->rmt_addr;
} else {
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
}
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
desc = &hp->md5_desc;
if (crypto_hash_init(desc))
goto clear_hash;
if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
if (crypto_hash_final(desc, md5_hash))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
int genhash;
u8 newhash[16];
hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return 0;
if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return 1;
}
if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return 1;
}
/* check the signature */
genhash = tcp_v6_md5_hash_skb(newhash,
hash_expected,
NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest));
}
return 1;
}
return 0;
}
#endif
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock),
.rtx_syn_ack = tcp_v6_rtx_synack,
.send_ack = tcp_v6_reqsk_send_ack,
.destructor = tcp_v6_reqsk_destructor,
.send_reset = tcp_v6_send_reset,
.syn_ack_timeout = tcp_syn_ack_timeout,
};
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.md5_lookup = tcp_v6_reqsk_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
};
#endif
static void __tcp_v6_send_check(struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr)
{
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
th->check = tcp_v6_check(skb->len, saddr, daddr,
csum_partial(th, th->doff << 2,
skb->csum));
}
}
static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
}
static int tcp_v6_gso_send_check(struct sk_buff *skb)
{
const struct ipv6hdr *ipv6h;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
ipv6h = ipv6_hdr(skb);
th = tcp_hdr(skb);
th->check = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
return 0;
}
static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
const struct ipv6hdr *iph = skb_gro_network_header(skb);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
break;
}
/* fall through */
case CHECKSUM_NONE:
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
return tcp_gro_receive(head, skb);
}
static int tcp6_gro_complete(struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
&iph->saddr, &iph->daddr, 0);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
return tcp_gro_complete(skb);
}
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
struct net *net = dev_net(skb_dst(skb)->dev);
struct sock *ctl_sk = net->ipv6.tcp_sk;
unsigned int tot_len = sizeof(struct tcphdr);
struct dst_entry *dst;
__be32 *topt;
if (ts)
tot_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG
if (key)
tot_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
GFP_ATOMIC);
if (buff == NULL)
return;
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
t1 = (struct tcphdr *) skb_push(buff, tot_len);
skb_reset_transport_header(buff);
/* Swap the send and the receive. */
memset(t1, 0, sizeof(*t1));
t1->dest = th->source;
t1->source = th->dest;
t1->doff = tot_len / 4;
t1->seq = htonl(seq);
t1->ack_seq = htonl(ack);
t1->ack = !rst || !th->ack;
t1->rst = rst;
t1->window = htons(win);
topt = (__be32 *)(t1 + 1);
if (ts) {
*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
*topt++ = htonl(tcp_time_stamp);
*topt++ = htonl(ts);
}
#ifdef CONFIG_TCP_MD5SIG
if (key) {
*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
tcp_v6_md5_hash_hdr((__u8 *)topt, key,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, t1);
}
#endif
memset(&fl6, 0, sizeof(fl6));
fl6.daddr = ipv6_hdr(skb)->saddr;
fl6.saddr = ipv6_hdr(skb)->daddr;
buff->ip_summed = CHECKSUM_PARTIAL;
buff->csum = 0;
__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
fl6.flowi6_proto = IPPROTO_TCP;
fl6.flowi6_oif = inet6_iif(skb);
fl6.fl6_dport = t1->dest;
fl6.fl6_sport = t1->source;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
/* Pass a socket to ip6_dst_lookup either it is for RST
* Underlying function will use this to retrieve the network
* namespace
*/
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
return;
}
kfree_skb(buff);
}
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = tcp_hdr(skb);
u32 seq = 0, ack_seq = 0;
struct tcp_md5sig_key *key = NULL;
#ifdef CONFIG_TCP_MD5SIG
const __u8 *hash_location = NULL;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
unsigned char newhash[16];
int genhash;
struct sock *sk1 = NULL;
#endif
if (th->rst)
return;
if (!ipv6_unicast_destination(skb))
return;
#ifdef CONFIG_TCP_MD5SIG
hash_location = tcp_parse_md5sig_option(th);
if (!sk && hash_location) {
/*
* active side is lost. Try to find listening socket through
* source port, and then find md5 key through listening socket.
* we are not loose security here:
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
&tcp_hashinfo, &ipv6h->daddr,
ntohs(th->source), inet6_iif(skb));
if (!sk1)
return;
rcu_read_lock();
key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
if (!key)
goto release_sk1;
genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
goto release_sk1;
} else {
key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
}
#endif
if (th->ack)
seq = ntohl(th->ack_seq);
else
ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
(th->doff << 2);
tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
#ifdef CONFIG_TCP_MD5SIG
release_sk1:
if (sk1) {
rcu_read_unlock();
sock_put(sk1);
}
#endif
}
static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
struct tcp_md5sig_key *key, u8 tclass)
{
tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
tw->tw_tclass);
inet_twsk_put(tw);
}
static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
}
static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
struct request_sock *req, **prev;
const struct tcphdr *th = tcp_hdr(skb);
struct sock *nsk;
/* Find possible connection requests. */
req = inet6_csk_search_req(sk, &prev, th->source,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, inet6_iif(skb));
if (req)
return tcp_check_req(sk, skb, req, prev);
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
if (nsk) {
if (nsk->sk_state != TCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
#ifdef CONFIG_SYN_COOKIES
if (!th->syn)
sk = cookie_v6_check(sk, skb);
#endif
return sk;
}
/* FIXME: this is substantially similar to the ipv4 code.
* Can some kind of merge be done? -- erics
*/
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
const u8 *hash_location;
struct request_sock *req;
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
int want_cookie = 0;
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_conn_request(sk, skb);
if (!ipv6_unicast_destination(skb))
goto drop;
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
goto drop;
}
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (req == NULL)
goto drop;
#ifdef CONFIG_TCP_MD5SIG
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
#endif
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
!tp->rx_opt.cookie_out_never &&
(sysctl_tcp_cookie_size > 0 ||
(tp->cookie_values != NULL &&
tp->cookie_values->cookie_desired > 0))) {
u8 *c;
u32 *d;
u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
goto drop_and_free;
/* Secret recipe starts with IP addresses */
d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
*mess++ ^= *d++;
/* plus variable length Initiator Cookie */
c = (u8 *)mess;
while (l-- > 0)
*c++ ^= *hash_location++;
want_cookie = 0; /* not our kind of cookie */
tmp_ext.cookie_out_never = 0; /* false */
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
} else if (!tp->rx_opt.cookie_in_always) {
/* redundant indications, but ensure initialization. */
tmp_ext.cookie_out_never = 1; /* true */
tmp_ext.cookie_plus = 0;
} else {
goto drop_and_free;
}
tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
treq = inet6_rsk(req);
treq->rmt_addr = ipv6_hdr(skb)->saddr;
treq->loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
treq->iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
treq->iif = inet6_iif(skb);
if (!isn) {
struct inet_peer *peer = NULL;
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
treq->pktopts = skb;
}
if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok;
goto have_isn;
}
/* VJ's idea. We save last timestamp seen
* from the destination in peer table, when entering
* state TIME-WAIT, and check against it before
* accepting new connection request.
*
* If "isn" is not zero, this request hit alive
* timewait bucket, so that all the necessary checks
* are made in the function processing timewait state.
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet6_csk_route_req(sk, req)) != NULL &&
(peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
&treq->rmt_addr)) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
}
/* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
(!peer || !peer->tcp_ts_stamp) &&
(!dst || !dst_metric(dst, RTAX_RTT))) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate
* to destinations, already remembered
* to the moment of synflood.
*/
LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
&treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
goto drop_and_release;
}
isn = tcp_v6_init_sequence(skb);
}
have_isn:
tcp_rsk(req)->snt_isn = isn;
tcp_rsk(req)->snt_synack = tcp_time_stamp;
security_inet_conn_request(sk, skb, req);
if (tcp_v6_send_synack(sk, req,
(struct request_values *)&tmp_ext) ||
want_cookie)
goto drop_and_free;
inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
return 0;
drop_and_release:
dst_release(dst);
drop_and_free:
reqsk_free(req);
drop:
return 0; /* don't send reset */
}
static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *treq;
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct tcp6_sock *newtcp6sk;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
struct ipv6_txoptions *opt;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
newtp = tcp_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
newnp->rcv_saddr = newnp->saddr;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, tcp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
treq = inet6_rsk(req);
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
dst = inet6_csk_route_req(sk, req);
if (!dst)
goto out;
}
newsk = tcp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, tcp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(newsk, dst, NULL, NULL);
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
newnp->daddr = treq->rmt_addr;
newnp->saddr = treq->loc_addr;
newnp->rcv_saddr = treq->loc_addr;
newsk->sk_bound_dev_if = treq->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (treq->pktopts != NULL) {
newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
kfree_skb(treq->pktopts);
treq->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
Yes, keeping reference count would be much more clever,
but we make one more one thing there: reattach optmem
to newsk.
*/
if (opt) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
if (tcp_rsk(req)->snt_synack)
tcp_valid_rtt_meas(newsk,
tcp_time_stamp - tcp_rsk(req)->snt_synack);
newtp->total_retrans = req->retrans;
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
/* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
AF_INET6, key->key, key->keylen, GFP_ATOMIC);
}
#endif
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
if (opt && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
}
static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
}
}
skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0));
if (skb->len <= 76) {
return __skb_checksum_complete(skb);
}
return 0;
}
/* The socket must have it's spinlock held when we get
* here.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp;
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
From backlog it always goes here. Kerboom...
Fortunately, tcp_rcv_established and rcv_established
handle them correctly, but it is not case with
tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
*/
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash (sk, skb))
goto discard;
#endif
if (sk_filter(sk, skb))
goto discard;
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
may make it not affecting IPv4.
The rest of code is protocol independent,
and I do not like idea to uglify IPv4.
Actually, all the idea behind IPV6_PKTOPTIONS
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
if (np->rxopt.all)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
return 0;
}
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
/*
* Queue it on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
if(nsk != sk) {
sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
__kfree_skb(opt_skb);
return 0;
}
} else
sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
return 0;
reset:
tcp_v6_send_reset(sk, skb);
discard:
if (opt_skb)
__kfree_skb(opt_skb);
kfree_skb(skb);
return 0;
csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
ipv6_pktoptions:
/* Do you ask, what is it?
1. skb was enqueued by tcp.
2. skb is added to tail of read queue, rather than out of order.
3. socket is not in passive state.
4. Finally, it really contains options, which user wants to receive.
*/
tp = tcp_sk(sk);
if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
np->mcast_oif = inet6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (np->rxopt.bits.rxtclass)
np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
} else {
__kfree_skb(opt_skb);
opt_skb = xchg(&np->pktoptions, NULL);
}
}
kfree_skb(opt_skb);
return 0;
}
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
const struct ipv6hdr *hdr;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
/*
* Count it even if it's bad.
*/
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
th = tcp_hdr(skb);
if (th->doff < sizeof(struct tcphdr)/4)
goto bad_packet;
if (!pskb_may_pull(skb, th->doff*4))
goto discard_it;
if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
goto bad_packet;
th = tcp_hdr(skb);
hdr = ipv6_hdr(skb);
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
if (!sk)
goto no_tcp_socket;
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse;
}
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
bh_lock_sock_nested(sk);
ret = 0;
if (!sock_owned_by_user(sk)) {
#ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb);
else
#endif
{
if (!tcp_prequeue(sk, skb))
ret = tcp_v6_do_rcv(sk, skb);
}
} else if (unlikely(sk_add_backlog(sk, skb))) {
bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
}
bh_unlock_sock(sk);
sock_put(sk);
return ret ? -1 : 0;
no_tcp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
} else {
tcp_v6_send_reset(NULL, skb);
}
discard_it:
/*
* Discard frame
*/
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN:
{
struct sock *sk2;
sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
&ipv6_hdr(skb)->daddr,
ntohs(th->dest), inet6_iif(skb));
if (sk2 != NULL) {
struct inet_timewait_sock *tw = inet_twsk(sk);
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
sk = sk2;
goto process;
}
/* Fall through to ACK */
}
case TCP_TW_ACK:
tcp_v6_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
goto no_tcp_socket;
case TCP_TW_SUCCESS:;
}
goto discard_it;
}
static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
{
struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_peer *peer;
if (!rt ||
!ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
peer = inet_getpeer_v6(&np->daddr, 1);
*release_it = true;
} else {
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
peer = rt->rt6i_peer;
*release_it = false;
}
return peer;
}
static void *tcp_v6_tw_get_peer(struct sock *sk)
{
const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
const struct inet_timewait_sock *tw = inet_twsk(sk);
if (tw->tw_family == AF_INET)
return tcp_v4_tw_get_peer(sk);
return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
}
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
.twsk_getpeer = tcp_v6_tw_get_peer,
};
static const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.get_peer = tcp_v6_get_peer,
.net_header_len = sizeof(struct ipv6hdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
.md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
.md5_parse = tcp_v6_parse_md5_keys,
};
#endif
/*
* TCP over IPv4 via INET6 API
*/
static const struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
.bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
#endif
};
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_parse = tcp_v6_parse_md5_keys,
};
#endif
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int tcp_v6_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
tp->snd_cwnd = 2;
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
sk->sk_state = TCP_CLOSE;
icsk->icsk_af_ops = &ipv6_specific;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
icsk->icsk_sync_mss = tcp_sync_mss;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
/* TCP Cookie Transactions */
if (sysctl_tcp_cookie_size > 0) {
/* Default, cookies without s_data_payload. */
tp->cookie_values =
kzalloc(sizeof(*tp->cookie_values),
sk->sk_allocation);
if (tp->cookie_values != NULL)
kref_init(&tp->cookie_values->kref);
}
/* Presumed zeroed, in order of appearance:
* cookie_in_always, cookie_out_never,
* s_data_constant, s_data_in, s_data_out
*/
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
local_bh_disable();
sock_update_memcg(sk);
sk_sockets_allocated_inc(sk);
local_bh_enable();
return 0;
}
static void tcp_v6_destroy_sock(struct sock *sk)
{
tcp_v4_destroy_sock(sk);
inet6_destroy_sock(sk);
}
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
const struct sock *sk, struct request_sock *req, int i, int uid)
{
int ttd = req->expires - jiffies;
const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
if (ttd < 0)
ttd = 0;
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
ntohs(inet_rsk(req)->loc_port),
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3],
ntohs(inet_rsk(req)->rmt_port),
TCP_SYN_RECV,
0,0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
jiffies_to_clock_t(ttd),
req->retrans,
uid,
0, /* non standard timer */
0, /* open_requests have no inode */
0, req);
}
static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
{
const struct in6_addr *dest, *src;
__u16 destp, srcp;
int timer_active;
unsigned long timer_expires;
const struct inet_sock *inet = inet_sk(sp);
const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
const struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
src = &np->rcv_saddr;
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
}
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
sp->sk_state,
tp->write_seq-tp->snd_una,
(sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
timer_active,
jiffies_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
sock_i_uid(sp),
icsk->icsk_probes_out,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
);
}
static void get_timewait6_sock(struct seq_file *seq,
struct inet_timewait_sock *tw, int i)
{
const struct in6_addr *dest, *src;
__u16 destp, srcp;
const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
ttd = 0;
dest = &tw6->tw_v6_daddr;
src = &tw6->tw_v6_rcv_saddr;
destp = ntohs(tw->tw_dport);
srcp = ntohs(tw->tw_sport);
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
tw->tw_substate, 0, 0,
3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw);
}
static int tcp6_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
" sl "
"local_address "
"remote_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode\n");
goto out;
}
st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED:
get_tcp6_sock(seq, v, st->num);
break;
case TCP_SEQ_STATE_OPENREQ:
get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
break;
case TCP_SEQ_STATE_TIME_WAIT:
get_timewait6_sock(seq, v, st->num);
break;
}
out:
return 0;
}
static const struct file_operations tcp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = tcp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct tcp_seq_afinfo tcp6_seq_afinfo = {
.name = "tcp6",
.family = AF_INET6,
.seq_fops = &tcp6_afinfo_seq_fops,
.seq_ops = {
.show = tcp6_seq_show,
},
};
int __net_init tcp6_proc_init(struct net *net)
{
return tcp_proc_register(net, &tcp6_seq_afinfo);
}
void tcp6_proc_exit(struct net *net)
{
tcp_proc_unregister(net, &tcp6_seq_afinfo);
}
#endif
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
.close = tcp_close,
.connect = tcp_v6_connect,
.disconnect = tcp_disconnect,
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v6_init_sock,
.destroy = tcp_v6_destroy_sock,
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
.hash = tcp_v6_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
.orphan_count = &tcp_orphan_count,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
.no_autobind = true,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
};
static const struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
.gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static struct inet_protosw tcpv6_protosw = {
.type = SOCK_STREAM,
.protocol = IPPROTO_TCP,
.prot = &tcpv6_prot,
.ops = &inet6_stream_ops,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
};
static int __net_init tcpv6_net_init(struct net *net)
{
return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
SOCK_RAW, IPPROTO_TCP, net);
}
static void __net_exit tcpv6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
}
static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
{
inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
}
static struct pernet_operations tcpv6_net_ops = {
.init = tcpv6_net_init,
.exit = tcpv6_net_exit,
.exit_batch = tcpv6_net_exit_batch,
};
int __init tcpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
if (ret)
goto out;
/* register inet6 protocol */
ret = inet6_register_protosw(&tcpv6_protosw);
if (ret)
goto out_tcpv6_protocol;
ret = register_pernet_subsys(&tcpv6_net_ops);
if (ret)
goto out_tcpv6_protosw;
out:
return ret;
out_tcpv6_protocol:
inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
out_tcpv6_protosw:
inet6_unregister_protosw(&tcpv6_protosw);
goto out;
}
void tcpv6_exit(void)
{
unregister_pernet_subsys(&tcpv6_net_ops);
inet6_unregister_protosw(&tcpv6_protosw);
inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
}
| gpl-2.0 |
hiikezoe/android_kernel_panasonic_p02e | arch/powerpc/platforms/85xx/tqm85xx.c | 4458 | 3547 | /*
* Based on MPC8560 ADS and arch/ppc tqm85xx ports
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2008 Freescale Semiconductor Inc.
*
* Copyright (c) 2005-2006 DENX Software Engineering
* Stefan Roese <sr@denx.de>
*
* Based on original work by
* Kumar Gala <kumar.gala@freescale.com>
* Copyright 2004 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "mpc85xx.h"
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#endif /* CONFIG_CPM2 */
static void __init tqm85xx_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
mpc85xx_cpm2_pic_init();
}
/*
* Setup the architecture
*/
static void __init tqm85xx_setup_arch(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("tqm85xx_setup_arch()", 0);
#ifdef CONFIG_CPM2
cpm2_reset();
#endif
#ifdef CONFIG_PCI
for_each_node_by_type(np, "pci") {
if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
struct resource rsrc;
if (!of_address_to_resource(np, 0, &rsrc)) {
if ((rsrc.start & 0xfffff) == 0x8000)
fsl_add_bridge(np, 1);
else
fsl_add_bridge(np, 0);
}
}
}
#endif
}
static void tqm85xx_show_cpuinfo(struct seq_file *m)
{
uint pvid, svid, phid1;
pvid = mfspr(SPRN_PVR);
svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: TQ Components\n");
seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
/* Display cpu Pll setting */
phid1 = mfspr(SPRN_HID1);
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
static void __init tqm85xx_ti1520_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(tqm85xx))
return;
dev_info(&pdev->dev, "Using TI 1520 fixup on TQM85xx\n");
/*
* Enable P2CCLK bit in system control register
* to enable CLOCK output to power chip
*/
pci_read_config_dword(pdev, 0x80, &val);
pci_write_config_dword(pdev, 0x80, val | (1 << 27));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
tqm85xx_ti1520_fixup);
machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices);
static const char *board[] __initdata = {
"tqc,tqm8540",
"tqc,tqm8541",
"tqc,tqm8548",
"tqc,tqm8555",
"tqc,tqm8560",
NULL
};
/*
* Called very early, device-tree isn't unflattened
*/
static int __init tqm85xx_probe(void)
{
return of_flat_dt_match(of_get_flat_dt_root(), board);
}
define_machine(tqm85xx) {
.name = "TQM85xx",
.probe = tqm85xx_probe,
.setup_arch = tqm85xx_setup_arch,
.init_IRQ = tqm85xx_pic_init,
.show_cpuinfo = tqm85xx_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
badbear1727/DALI-LINARO | drivers/input/mouse/qci_touchpad.c | 4714 | 7856 | /* Quanta I2C Touchpad Driver
*
* Copyright (C) 2009 Quanta Computer Inc.
* Author: Hsin Wu <hsin.wu@quantatw.com>
* Author: Austin Lai <austin.lai@quantatw.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
*
* The Driver with I/O communications via the I2C Interface for ON2 of AP BU.
* And it is only working on the nuvoTon WPCE775x Embedded Controller.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/keyboard.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#define TOUCHPAD_ID_NAME "qci-i2cpad"
#define TOUCHPAD_NAME "PS2 Touchpad"
#define TOUCHPAD_DEVICE "/i2c/input1"
#define TOUCHPAD_CMD_ENABLE 0xF4
#define TOUCHPAD_INIT_DELAY_MS 100
static int __devinit qcitp_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int __devexit qcitp_remove(struct i2c_client *kbd);
/* General structure to hold the driver data */
struct i2ctpad_drv_data {
struct i2c_client *ti2c_client;
struct work_struct work;
struct input_dev *qcitp_dev;
struct kobject *tp_kobj;
unsigned int qcitp_gpio;
unsigned int qcitp_irq;
char ecdata[8];
};
static int tp_sense_val = 10;
static ssize_t tp_sensitive_show(struct kobject *kobj,
struct kobj_attribute *attr, char * buf)
{
return sprintf(buf, "%d\n", tp_sense_val);
}
static ssize_t tp_sensitive_store(struct kobject *kobj,
struct kobj_attribute *attr, const char* buf, size_t n)
{
unsigned int val = 0;
sscanf(buf, "%d", &val);
if (val >= 1 && val <= 10)
tp_sense_val = val;
else
return -ENOSYS;
return sizeof(buf);
}
static struct kobj_attribute tp_sensitivity = __ATTR(tp_sensitivity ,
0644 ,
tp_sensitive_show ,
tp_sensitive_store);
static struct attribute *g_tp[] = {
&tp_sensitivity.attr,
NULL,
};
static struct attribute_group attr_group = {
.attrs = g_tp,
};
/*-----------------------------------------------------------------------------
* Driver functions
*---------------------------------------------------------------------------*/
#ifdef CONFIG_PM
static int qcitp_suspend(struct device *dev)
{
return 0;
}
static int qcitp_resume(struct device *dev)
{
return 0;
}
#endif
static const struct i2c_device_id qcitp_idtable[] = {
{ TOUCHPAD_ID_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, qcitp_idtable);
#ifdef CONFIG_PM
static const struct dev_pm_ops qcitp_pm_ops = {
.suspend = qcitp_suspend,
.resume = qcitp_resume,
};
#endif
static struct i2c_driver i2ctp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = TOUCHPAD_ID_NAME,
#ifdef CONFIG_PM
.pm = &qcitp_pm_ops,
#endif
},
.probe = qcitp_probe,
.remove = __devexit_p(qcitp_remove),
.id_table = qcitp_idtable,
};
static void qcitp_fetch_data(struct i2c_client *tpad_client,
char *ec_data)
{
struct i2c_msg tp_msg;
int ret;
tp_msg.addr = tpad_client->addr;
tp_msg.flags = I2C_M_RD;
tp_msg.len = 3;
tp_msg.buf = (char *)&ec_data[0];
ret = i2c_transfer(tpad_client->adapter, &tp_msg, 1);
}
static void qcitp_report_key(struct input_dev *tpad_dev, char *ec_data)
{
int dx = 0;
int dy = 0;
if (ec_data[1])
dx = (int) ec_data[1] -
(int) ((ec_data[0] << 4) & 0x100);
if (ec_data[2])
dy = (int) ((ec_data[0] << 3) & 0x100) -
(int) ec_data[2];
dx = (dx * tp_sense_val)/10;
dy = (dy * tp_sense_val)/10;
input_report_key(tpad_dev, BTN_LEFT, ec_data[0] & 0x01);
input_report_key(tpad_dev, BTN_RIGHT, ec_data[0] & 0x02);
input_report_key(tpad_dev, BTN_MIDDLE, ec_data[0] & 0x04);
input_report_rel(tpad_dev, REL_X, dx);
input_report_rel(tpad_dev, REL_Y, dy);
input_sync(tpad_dev);
}
static void qcitp_work_handler(struct work_struct *_work)
{
struct i2ctpad_drv_data *itpad_drv_data =
container_of(_work, struct i2ctpad_drv_data, work);
struct i2c_client *itpad_client = itpad_drv_data->ti2c_client;
struct input_dev *itpad_dev = itpad_drv_data->qcitp_dev;
qcitp_fetch_data(itpad_client, itpad_drv_data->ecdata);
qcitp_report_key(itpad_dev, itpad_drv_data->ecdata);
}
static irqreturn_t qcitp_interrupt(int irq, void *dev_id)
{
struct i2ctpad_drv_data *itpad_drv_data = dev_id;
schedule_work(&itpad_drv_data->work);
return IRQ_HANDLED;
}
static int __devinit qcitp_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int err = -ENOMEM;
struct i2ctpad_drv_data *context = 0;
context = kzalloc(sizeof(struct i2ctpad_drv_data), GFP_KERNEL);
if (!context)
return err;
i2c_set_clientdata(client, context);
context->ti2c_client = client;
context->qcitp_gpio = client->irq;
/* Enable mouse */
i2c_smbus_write_byte(client, TOUCHPAD_CMD_ENABLE);
msleep(TOUCHPAD_INIT_DELAY_MS);
i2c_smbus_read_byte(client);
/*allocate and register input device*/
context->qcitp_dev = input_allocate_device();
if (!context->qcitp_dev) {
pr_err("[TouchPad] allocting memory fail\n");
err = -ENOMEM;
goto allocate_fail;
}
context->qcitp_dev->name = TOUCHPAD_NAME;
context->qcitp_dev->phys = TOUCHPAD_DEVICE;
context->qcitp_dev->id.bustype = BUS_I2C;
context->qcitp_dev->id.vendor = 0x1050;
context->qcitp_dev->id.product = 0x1;
context->qcitp_dev->id.version = 0x1;
context->qcitp_dev->evbit[0] = BIT_MASK(EV_KEY) |
BIT_MASK(EV_REL);
context->qcitp_dev->relbit[0] = BIT_MASK(REL_X) |
BIT_MASK(REL_Y);
context->qcitp_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_MIDDLE) |
BIT_MASK(BTN_RIGHT);
input_set_drvdata(context->qcitp_dev, context);
err = input_register_device(context->qcitp_dev);
if (err) {
pr_err("[TouchPad] register device fail\n");
goto register_fail;
}
/*request intterrupt*/
INIT_WORK(&context->work, qcitp_work_handler);
err = gpio_request(context->qcitp_gpio, "qci-pad");
if (err) {
pr_err("[TouchPad]err gpio request\n");
goto gpio_request_fail;
}
context->qcitp_irq = gpio_to_irq(context->qcitp_gpio);
err = request_irq(context->qcitp_irq,
qcitp_interrupt,
IRQF_TRIGGER_FALLING,
TOUCHPAD_ID_NAME,
context);
if (err) {
pr_err("[TouchPad] unable to get IRQ\n");
goto request_irq_fail;
}
/*create touchpad kobject*/
context->tp_kobj = kobject_create_and_add("touchpad", NULL);
err = sysfs_create_group(context->tp_kobj, &attr_group);
if (err)
pr_warning("[TouchPad] sysfs create fail\n");
tp_sense_val = 10;
return 0;
request_irq_fail:
gpio_free(context->qcitp_gpio);
gpio_request_fail:
input_unregister_device(context->qcitp_dev);
register_fail:
input_free_device(context->qcitp_dev);
allocate_fail:
i2c_set_clientdata(client, NULL);
kfree(context);
return err;
}
static int __devexit qcitp_remove(struct i2c_client *dev)
{
struct i2ctpad_drv_data *context = i2c_get_clientdata(dev);
free_irq(context->qcitp_irq, context);
gpio_free(context->qcitp_gpio);
input_free_device(context->qcitp_dev);
input_unregister_device(context->qcitp_dev);
kfree(context);
return 0;
}
static int __init qcitp_init(void)
{
return i2c_add_driver(&i2ctp_driver);
}
static void __exit qcitp_exit(void)
{
i2c_del_driver(&i2ctp_driver);
}
module_init(qcitp_init);
module_exit(qcitp_exit);
MODULE_AUTHOR("Quanta Computer Inc.");
MODULE_DESCRIPTION("Quanta Embedded Controller I2C Touch Pad Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
TomEwbank/linux-3.2.35 | sound/drivers/opl3/opl3_midi.c | 4714 | 22751 | /*
* Copyright (c) by Uros Bizjak <uros@kss-loka.si>
*
* Midi synth routines for OPL2/OPL3/OPL4 FM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#undef DEBUG_ALLOC
#undef DEBUG_MIDI
#include "opl3_voice.h"
#include <sound/asoundef.h>
extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
extern int use_internal_drums;
static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
struct snd_midi_channel *chan);
/*
* The next table looks magical, but it certainly is not. Its values have
* been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
* for i=0. This log-table converts a linear volume-scaling (0..127) to a
* logarithmic scaling as present in the FM-synthesizer chips. so : Volume
* 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative
* volume -8 it was implemented as a table because it is only 128 bytes and
* it saves a lot of log() calculations. (Rob Hooft <hooft@chem.ruu.nl>)
*/
static char opl3_volume_table[128] =
{
-63, -48, -40, -35, -32, -29, -27, -26,
-24, -23, -21, -20, -19, -18, -18, -17,
-16, -15, -15, -14, -13, -13, -12, -12,
-11, -11, -10, -10, -10, -9, -9, -8,
-8, -8, -7, -7, -7, -6, -6, -6,
-5, -5, -5, -5, -4, -4, -4, -4,
-3, -3, -3, -3, -2, -2, -2, -2,
-2, -1, -1, -1, -1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1,
1, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4, 4, 4, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6,
6, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 8, 8, 8, 8, 8
};
void snd_opl3_calc_volume(unsigned char *volbyte, int vel,
struct snd_midi_channel *chan)
{
int oldvol, newvol, n;
int volume;
volume = (vel * chan->gm_volume * chan->gm_expression) / (127*127);
if (volume > 127)
volume = 127;
oldvol = OPL3_TOTAL_LEVEL_MASK - (*volbyte & OPL3_TOTAL_LEVEL_MASK);
newvol = opl3_volume_table[volume] + oldvol;
if (newvol > OPL3_TOTAL_LEVEL_MASK)
newvol = OPL3_TOTAL_LEVEL_MASK;
else if (newvol < 0)
newvol = 0;
n = OPL3_TOTAL_LEVEL_MASK - (newvol & OPL3_TOTAL_LEVEL_MASK);
*volbyte = (*volbyte & OPL3_KSL_MASK) | (n & OPL3_TOTAL_LEVEL_MASK);
}
/*
* Converts the note frequency to block and fnum values for the FM chip
*/
static short opl3_note_table[16] =
{
305, 323, /* for pitch bending, -2 semitones */
343, 363, 385, 408, 432, 458, 485, 514, 544, 577, 611, 647,
686, 726 /* for pitch bending, +2 semitones */
};
static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum,
int note, struct snd_midi_channel *chan)
{
int block = ((note / 12) & 0x07) - 1;
int idx = (note % 12) + 2;
int freq;
if (chan->midi_pitchbend) {
int pitchbend = chan->midi_pitchbend;
int segment;
if (pitchbend > 0x1FFF)
pitchbend = 0x1FFF;
segment = pitchbend / 0x1000;
freq = opl3_note_table[idx+segment];
freq += ((opl3_note_table[idx+segment+1] - freq) *
(pitchbend % 0x1000)) / 0x1000;
} else {
freq = opl3_note_table[idx];
}
*fnum = (unsigned char) freq;
*blocknum = ((freq >> 8) & OPL3_FNUM_HIGH_MASK) |
((block << 2) & OPL3_BLOCKNUM_MASK);
}
#ifdef DEBUG_ALLOC
static void debug_alloc(struct snd_opl3 *opl3, char *s, int voice) {
int i;
char *str = "x.24";
printk(KERN_DEBUG "time %.5i: %s [%.2i]: ", opl3->use_time, s, voice);
for (i = 0; i < opl3->max_voices; i++)
printk("%c", *(str + opl3->voices[i].state + 1));
printk("\n");
}
#endif
/*
* Get a FM voice (channel) to play a note on.
*/
static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op,
struct snd_midi_channel *chan) {
int chan_4op_1; /* first voice for 4op instrument */
int chan_4op_2; /* second voice for 4op instrument */
struct snd_opl3_voice *vp, *vp2;
unsigned int voice_time;
int i;
#ifdef DEBUG_ALLOC
char *alloc_type[3] = { "FREE ", "CHEAP ", "EXPENSIVE" };
#endif
/* This is our "allocation cost" table */
enum {
FREE = 0, CHEAP, EXPENSIVE, END
};
/* Keeps track of what we are finding */
struct best {
unsigned int time;
int voice;
} best[END];
struct best *bp;
for (i = 0; i < END; i++) {
best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */;
best[i].voice = -1;
}
/* Look through all the channels for the most suitable. */
for (i = 0; i < opl3->max_voices; i++) {
vp = &opl3->voices[i];
if (vp->state == SNDRV_OPL3_ST_NOT_AVAIL)
/* skip unavailable channels, allocated by
drum voices or by bounded 4op voices) */
continue;
voice_time = vp->time;
bp = best;
chan_4op_1 = ((i < 3) || (i > 8 && i < 12));
chan_4op_2 = ((i > 2 && i < 6) || (i > 11 && i < 15));
if (instr_4op) {
/* allocate 4op voice */
/* skip channels unavailable to 4op instrument */
if (!chan_4op_1)
continue;
if (vp->state)
/* kill one voice, CHEAP */
bp++;
/* get state of bounded 2op channel
to be allocated for 4op instrument */
vp2 = &opl3->voices[i + 3];
if (vp2->state == SNDRV_OPL3_ST_ON_2OP) {
/* kill two voices, EXPENSIVE */
bp++;
voice_time = (voice_time > vp->time) ?
voice_time : vp->time;
}
} else {
/* allocate 2op voice */
if ((chan_4op_1) || (chan_4op_2))
/* use bounded channels for 2op, CHEAP */
bp++;
else if (vp->state)
/* kill one voice on 2op channel, CHEAP */
bp++;
/* raise kill cost to EXPENSIVE for all channels */
if (vp->state)
bp++;
}
if (voice_time < bp->time) {
bp->time = voice_time;
bp->voice = i;
}
}
for (i = 0; i < END; i++) {
if (best[i].voice >= 0) {
#ifdef DEBUG_ALLOC
printk(KERN_DEBUG "%s %iop allocation on voice %i\n",
alloc_type[i], instr_4op ? 4 : 2,
best[i].voice);
#endif
return best[i].voice;
}
}
/* not found */
return -1;
}
/* ------------------------------ */
/*
* System timer interrupt function
*/
void snd_opl3_timer_func(unsigned long data)
{
struct snd_opl3 *opl3 = (struct snd_opl3 *)data;
unsigned long flags;
int again = 0;
int i;
spin_lock_irqsave(&opl3->voice_lock, flags);
for (i = 0; i < opl3->max_voices; i++) {
struct snd_opl3_voice *vp = &opl3->voices[i];
if (vp->state > 0 && vp->note_off_check) {
if (vp->note_off == jiffies)
snd_opl3_note_off_unsafe(opl3, vp->note, 0,
vp->chan);
else
again++;
}
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
if (again) {
opl3->tlist.expires = jiffies + 1; /* invoke again */
add_timer(&opl3->tlist);
} else {
opl3->sys_timer_status = 0;
}
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
}
/*
* Start system timer
*/
static void snd_opl3_start_timer(struct snd_opl3 *opl3)
{
unsigned long flags;
spin_lock_irqsave(&opl3->sys_timer_lock, flags);
if (! opl3->sys_timer_status) {
opl3->tlist.expires = jiffies + 1;
add_timer(&opl3->tlist);
opl3->sys_timer_status = 1;
}
spin_unlock_irqrestore(&opl3->sys_timer_lock, flags);
}
/* ------------------------------ */
static int snd_opl3_oss_map[MAX_OPL3_VOICES] = {
0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17, 3, 4 ,5, 12, 13, 14
};
/*
* Start a note.
*/
void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
int instr_4op;
int voice;
struct snd_opl3_voice *vp, *vp2;
unsigned short connect_mask;
unsigned char connection;
unsigned char vol_op[4];
int extra_prg = 0;
unsigned short reg_side;
unsigned char op_offset;
unsigned char voice_offset;
unsigned short opl3_reg;
unsigned char reg_val;
unsigned char prg, bank;
int key = note;
unsigned char fnum, blocknum;
int i;
struct fm_patch *patch;
struct fm_instrument *fm;
unsigned long flags;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Note on, ch %i, inst %i, note %i, vel %i\n",
chan->number, chan->midi_program, note, vel);
#endif
/* in SYNTH mode, application takes care of voices */
/* in SEQ mode, drum voice numbers are notes on drum channel */
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
if (chan->drum_channel) {
/* percussion instruments are located in bank 128 */
bank = 128;
prg = note;
} else {
bank = chan->gm_bank_select;
prg = chan->midi_program;
}
} else {
/* Prepare for OSS mode */
if (chan->number >= MAX_OPL3_VOICES)
return;
/* OSS instruments are located in bank 127 */
bank = 127;
prg = chan->midi_program;
}
spin_lock_irqsave(&opl3->voice_lock, flags);
if (use_internal_drums) {
snd_opl3_drum_switch(opl3, note, vel, 1, chan);
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
__extra_prg:
patch = snd_opl3_find_patch(opl3, prg, bank, 0);
if (!patch) {
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
fm = &patch->inst;
switch (patch->type) {
case FM_PATCH_OPL2:
instr_4op = 0;
break;
case FM_PATCH_OPL3:
if (opl3->hardware >= OPL3_HW_OPL3) {
instr_4op = 1;
break;
}
default:
spin_unlock_irqrestore(&opl3->voice_lock, flags);
return;
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> OPL%i instrument: %s\n",
instr_4op ? 3 : 2, patch->name);
#endif
/* in SYNTH mode, application takes care of voices */
/* in SEQ mode, allocate voice on free OPL3 channel */
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
voice = opl3_get_voice(opl3, instr_4op, chan);
} else {
/* remap OSS voice */
voice = snd_opl3_oss_map[chan->number];
}
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
connect_mask = (OPL3_LEFT_4OP_0 << voice_offset) & 0x07;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
connect_mask = (OPL3_RIGHT_4OP_0 << voice_offset) & 0x38;
}
/* kill voice on channel */
vp = &opl3->voices[voice];
if (vp->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
opl3->command(opl3, opl3_reg, reg_val);
}
if (instr_4op) {
vp2 = &opl3->voices[voice + 3];
if (vp->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
voice_offset + 3);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
opl3->command(opl3, opl3_reg, reg_val);
}
}
/* set connection register */
if (instr_4op) {
if ((opl3->connection_reg ^ connect_mask) & connect_mask) {
opl3->connection_reg |= connect_mask;
/* set connection bit */
opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT;
opl3->command(opl3, opl3_reg, opl3->connection_reg);
}
} else {
if ((opl3->connection_reg ^ ~connect_mask) & connect_mask) {
opl3->connection_reg &= ~connect_mask;
/* clear connection bit */
opl3_reg = OPL3_RIGHT | OPL3_REG_CONNECTION_SELECT;
opl3->command(opl3, opl3_reg, opl3->connection_reg);
}
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> setting OPL3 connection: 0x%x\n",
opl3->connection_reg);
#endif
/*
* calculate volume depending on connection
* between FM operators (see include/opl3.h)
*/
for (i = 0; i < (instr_4op ? 4 : 2); i++)
vol_op[i] = fm->op[i].ksl_level;
connection = fm->feedback_connection[0] & 0x01;
if (instr_4op) {
connection <<= 1;
connection |= fm->feedback_connection[1] & 0x01;
snd_opl3_calc_volume(&vol_op[3], vel, chan);
switch (connection) {
case 0x03:
snd_opl3_calc_volume(&vol_op[2], vel, chan);
/* fallthru */
case 0x02:
snd_opl3_calc_volume(&vol_op[0], vel, chan);
break;
case 0x01:
snd_opl3_calc_volume(&vol_op[1], vel, chan);
}
} else {
snd_opl3_calc_volume(&vol_op[1], vel, chan);
if (connection)
snd_opl3_calc_volume(&vol_op[0], vel, chan);
}
/* Program the FM voice characteristics */
for (i = 0; i < (instr_4op ? 4 : 2); i++) {
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> programming operator %i\n", i);
#endif
op_offset = snd_opl3_regmap[voice_offset][i];
/* Set OPL3 AM_VIB register of requested voice/operator */
reg_val = fm->op[i].am_vib;
opl3_reg = reg_side | (OPL3_REG_AM_VIB + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 KSL_LEVEL register of requested voice/operator */
reg_val = vol_op[i];
opl3_reg = reg_side | (OPL3_REG_KSL_LEVEL + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 ATTACK_DECAY register of requested voice/operator */
reg_val = fm->op[i].attack_decay;
opl3_reg = reg_side | (OPL3_REG_ATTACK_DECAY + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Set OPL3 SUSTAIN_RELEASE register of requested voice/operator */
reg_val = fm->op[i].sustain_release;
opl3_reg = reg_side | (OPL3_REG_SUSTAIN_RELEASE + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
/* Select waveform */
reg_val = fm->op[i].wave_select;
opl3_reg = reg_side | (OPL3_REG_WAVE_SELECT + op_offset);
opl3->command(opl3, opl3_reg, reg_val);
}
/* Set operator feedback and 2op inter-operator connection */
reg_val = fm->feedback_connection[0];
/* Set output voice connection */
reg_val |= OPL3_STEREO_BITS;
if (chan->gm_pan < 43)
reg_val &= ~OPL3_VOICE_TO_RIGHT;
if (chan->gm_pan > 85)
reg_val &= ~OPL3_VOICE_TO_LEFT;
opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset);
opl3->command(opl3, opl3_reg, reg_val);
if (instr_4op) {
/* Set 4op inter-operator connection */
reg_val = fm->feedback_connection[1] & OPL3_CONNECTION_BIT;
/* Set output voice connection */
reg_val |= OPL3_STEREO_BITS;
if (chan->gm_pan < 43)
reg_val &= ~OPL3_VOICE_TO_RIGHT;
if (chan->gm_pan > 85)
reg_val &= ~OPL3_VOICE_TO_LEFT;
opl3_reg = reg_side | (OPL3_REG_FEEDBACK_CONNECTION +
voice_offset + 3);
opl3->command(opl3, opl3_reg, reg_val);
}
/*
* Special treatment of percussion notes for fm:
* Requested pitch is really program, and pitch for
* device is whatever was specified in the patch library.
*/
if (fm->fix_key)
note = fm->fix_key;
/*
* use transpose if defined in patch library
*/
if (fm->trnsps)
note += (fm->trnsps - 64);
snd_opl3_calc_pitch(&fnum, &blocknum, note, chan);
/* Set OPL3 FNUM_LOW register of requested voice */
opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset);
opl3->command(opl3, opl3_reg, fnum);
opl3->voices[voice].keyon_reg = blocknum;
/* Set output sound flag */
blocknum |= OPL3_KEYON_BIT;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> trigger voice %i\n", voice);
#endif
/* Set OPL3 KEYON_BLOCK register of requested voice */
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
opl3->command(opl3, opl3_reg, blocknum);
/* kill note after fixed duration (in centiseconds) */
if (fm->fix_dur) {
opl3->voices[voice].note_off = jiffies +
(fm->fix_dur * HZ) / 100;
snd_opl3_start_timer(opl3);
opl3->voices[voice].note_off_check = 1;
} else
opl3->voices[voice].note_off_check = 0;
/* get extra pgm, but avoid possible loops */
extra_prg = (extra_prg) ? 0 : fm->modes;
/* do the bookkeeping */
vp->time = opl3->use_time++;
vp->note = key;
vp->chan = chan;
if (instr_4op) {
vp->state = SNDRV_OPL3_ST_ON_4OP;
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->note = key;
vp2->chan = chan;
vp2->state = SNDRV_OPL3_ST_NOT_AVAIL;
} else {
if (vp->state == SNDRV_OPL3_ST_ON_4OP) {
/* 4op killed by 2op, release bounded voice */
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->state = SNDRV_OPL3_ST_OFF;
}
vp->state = SNDRV_OPL3_ST_ON_2OP;
}
#ifdef DEBUG_ALLOC
debug_alloc(opl3, "note on ", voice);
#endif
/* allocate extra program if specified in patch library */
if (extra_prg) {
if (extra_prg > 128) {
bank = 128;
/* percussions start at 35 */
prg = extra_prg - 128 + 35 - 1;
} else {
bank = 0;
prg = extra_prg - 1;
}
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " *** allocating extra program\n");
#endif
goto __extra_prg;
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice)
{
unsigned short reg_side;
unsigned char voice_offset;
unsigned short opl3_reg;
struct snd_opl3_voice *vp, *vp2;
if (snd_BUG_ON(voice >= MAX_OPL3_VOICES))
return;
vp = &opl3->voices[voice];
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
}
/* kill voice */
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG " --> kill voice %i\n", voice);
#endif
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
/* clear Key ON bit */
opl3->command(opl3, opl3_reg, vp->keyon_reg);
/* do the bookkeeping */
vp->time = opl3->use_time++;
if (vp->state == SNDRV_OPL3_ST_ON_4OP) {
vp2 = &opl3->voices[voice + 3];
vp2->time = opl3->use_time++;
vp2->state = SNDRV_OPL3_ST_OFF;
}
vp->state = SNDRV_OPL3_ST_OFF;
#ifdef DEBUG_ALLOC
debug_alloc(opl3, "note off", voice);
#endif
}
/*
* Release a note in response to a midi note off.
*/
static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
int voice;
struct snd_opl3_voice *vp;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Note off, ch %i, inst %i, note %i\n",
chan->number, chan->midi_program, note);
#endif
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
if (chan->drum_channel && use_internal_drums) {
snd_opl3_drum_switch(opl3, note, vel, 0, chan);
return;
}
/* this loop will hopefully kill all extra voices, because
they are grouped by the same channel and note values */
for (voice = 0; voice < opl3->max_voices; voice++) {
vp = &opl3->voices[voice];
if (vp->state > 0 && vp->chan == chan && vp->note == note) {
snd_opl3_kill_voice(opl3, voice);
}
}
} else {
/* remap OSS voices */
if (chan->number < MAX_OPL3_VOICES) {
voice = snd_opl3_oss_map[chan->number];
snd_opl3_kill_voice(opl3, voice);
}
}
}
void snd_opl3_note_off(void *p, int note, int vel,
struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3 = p;
unsigned long flags;
spin_lock_irqsave(&opl3->voice_lock, flags);
snd_opl3_note_off_unsafe(p, note, vel, chan);
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
/*
* key pressure change
*/
void snd_opl3_key_press(void *p, int note, int vel, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Key pressure, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
/*
* terminate note
*/
void snd_opl3_terminate_note(void *p, int note, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Terminate note, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
static void snd_opl3_update_pitch(struct snd_opl3 *opl3, int voice)
{
unsigned short reg_side;
unsigned char voice_offset;
unsigned short opl3_reg;
unsigned char fnum, blocknum;
struct snd_opl3_voice *vp;
if (snd_BUG_ON(voice >= MAX_OPL3_VOICES))
return;
vp = &opl3->voices[voice];
if (vp->chan == NULL)
return; /* not allocated? */
if (voice < MAX_OPL2_VOICES) {
/* Left register block for voices 0 .. 8 */
reg_side = OPL3_LEFT;
voice_offset = voice;
} else {
/* Right register block for voices 9 .. 17 */
reg_side = OPL3_RIGHT;
voice_offset = voice - MAX_OPL2_VOICES;
}
snd_opl3_calc_pitch(&fnum, &blocknum, vp->note, vp->chan);
/* Set OPL3 FNUM_LOW register of requested voice */
opl3_reg = reg_side | (OPL3_REG_FNUM_LOW + voice_offset);
opl3->command(opl3, opl3_reg, fnum);
vp->keyon_reg = blocknum;
/* Set output sound flag */
blocknum |= OPL3_KEYON_BIT;
/* Set OPL3 KEYON_BLOCK register of requested voice */
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK + voice_offset);
opl3->command(opl3, opl3_reg, blocknum);
vp->time = opl3->use_time++;
}
/*
* Update voice pitch controller
*/
static void snd_opl3_pitch_ctrl(struct snd_opl3 *opl3, struct snd_midi_channel *chan)
{
int voice;
struct snd_opl3_voice *vp;
unsigned long flags;
spin_lock_irqsave(&opl3->voice_lock, flags);
if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
for (voice = 0; voice < opl3->max_voices; voice++) {
vp = &opl3->voices[voice];
if (vp->state > 0 && vp->chan == chan) {
snd_opl3_update_pitch(opl3, voice);
}
}
} else {
/* remap OSS voices */
if (chan->number < MAX_OPL3_VOICES) {
voice = snd_opl3_oss_map[chan->number];
snd_opl3_update_pitch(opl3, voice);
}
}
spin_unlock_irqrestore(&opl3->voice_lock, flags);
}
/*
* Deal with a controller type event. This includes all types of
* control events, not just the midi controllers
*/
void snd_opl3_control(void *p, int type, struct snd_midi_channel *chan)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "Controller, TYPE = %i, ch#: %i, inst#: %i\n",
type, chan->number, chan->midi_program);
#endif
switch (type) {
case MIDI_CTL_MSB_MODWHEEL:
if (chan->control[MIDI_CTL_MSB_MODWHEEL] > 63)
opl3->drum_reg |= OPL3_VIBRATO_DEPTH;
else
opl3->drum_reg &= ~OPL3_VIBRATO_DEPTH;
opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION,
opl3->drum_reg);
break;
case MIDI_CTL_E2_TREMOLO_DEPTH:
if (chan->control[MIDI_CTL_E2_TREMOLO_DEPTH] > 63)
opl3->drum_reg |= OPL3_TREMOLO_DEPTH;
else
opl3->drum_reg &= ~OPL3_TREMOLO_DEPTH;
opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION,
opl3->drum_reg);
break;
case MIDI_CTL_PITCHBEND:
snd_opl3_pitch_ctrl(opl3, chan);
break;
}
}
/*
* NRPN events
*/
void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "NRPN, ch#: %i, inst#: %i\n",
chan->number, chan->midi_program);
#endif
}
/*
* receive sysex
*/
void snd_opl3_sysex(void *p, unsigned char *buf, int len,
int parsed, struct snd_midi_channel_set *chset)
{
struct snd_opl3 *opl3;
opl3 = p;
#ifdef DEBUG_MIDI
snd_printk(KERN_DEBUG "SYSEX\n");
#endif
}
| gpl-2.0 |
gproj-m/lge-kernel-gproj | arch/powerpc/kernel/hw_breakpoint.c | 4714 | 9239 | /*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers. Derived from
* "arch/x86/kernel/hw_breakpoint.c"
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright 2010 IBM Corporation
* Author: K.Prasad <prasad@linux.vnet.ibm.com>
*
*/
#include <linux/hw_breakpoint.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/uaccess.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for every cpu
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
/*
* Returns total number of data or instruction breakpoints available.
*/
int hw_breakpoint_slots(int type)
{
if (type == TYPE_DATA)
return HBP_NUM;
return 0; /* no instruction breakpoints available */
}
/*
* Install a perf counter breakpoint.
*
* We seek a free debug address register and use it for this
* breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
*slot = bp;
/*
* Do not install DABR values if the instruction must be single-stepped.
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (current->thread.last_hit_ubp != bp)
set_dabr(info->address | info->type | DABR_TRANSLATION);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct perf_event **slot = &__get_cpu_var(bp_per_reg);
if (*slot != bp) {
WARN_ONCE(1, "Can't find the breakpoint");
return;
}
*slot = NULL;
set_dabr(0);
}
/*
* Perform cleanup of arch-specific counters during unregistration
* of the perf-event
*/
void arch_unregister_hw_breakpoint(struct perf_event *bp)
{
/*
* If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
*/
if (bp->ctx->task)
bp->ctx->task->thread.last_hit_ubp = NULL;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
return is_kernel_addr(info->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
{
switch (type) {
case DABR_DATA_READ:
*gen_bp_type = HW_BREAKPOINT_R;
break;
case DABR_DATA_WRITE:
*gen_bp_type = HW_BREAKPOINT_W;
break;
case (DABR_DATA_WRITE | DABR_DATA_READ):
*gen_bp_type = (HW_BREAKPOINT_W | HW_BREAKPOINT_R);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp)
{
int ret = -EINVAL;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (!bp)
return ret;
switch (bp->attr.bp_type) {
case HW_BREAKPOINT_R:
info->type = DABR_DATA_READ;
break;
case HW_BREAKPOINT_W:
info->type = DABR_DATA_WRITE;
break;
case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
info->type = (DABR_DATA_READ | DABR_DATA_WRITE);
break;
default:
return ret;
}
info->address = bp->attr.bp_addr;
info->len = bp->attr.bp_len;
/*
* Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
* and breakpoint addresses are aligned to nearest double-word
* HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
* 'symbolsize' should satisfy the check below.
*/
if (info->len >
(HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN)))
return -EINVAL;
return 0;
}
/*
* Restores the breakpoint on the debug registers.
* Invoke this function if it is known that the execution context is
* about to change to cause loss of MSR_SE settings.
*/
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
struct arch_hw_breakpoint *info;
if (likely(!tsk->thread.last_hit_ubp))
return;
info = counter_arch_bp(tsk->thread.last_hit_ubp);
regs->msr &= ~MSR_SE;
set_dabr(info->address | info->type | DABR_TRANSLATION);
tsk->thread.last_hit_ubp = NULL;
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int rc = NOTIFY_STOP;
struct perf_event *bp;
struct pt_regs *regs = args->regs;
int stepped = 1;
struct arch_hw_breakpoint *info;
unsigned int instr;
unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
set_dabr(0);
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = __get_cpu_var(bp_per_reg);
if (!bp)
goto out;
info = counter_arch_bp(bp);
/*
* Return early after invoking user-callback function without restoring
* DABR if the breakpoint is from ptrace which always operates in
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
* generated in do_dabr().
*/
if (bp->overflow_handler == ptrace_triggered) {
perf_bp_event(bp, regs);
rc = NOTIFY_DONE;
goto out;
}
/*
* Verify if dar lies within the address range occupied by the symbol
* being watched to filter extraneous exceptions. If it doesn't,
* we still need to single-step the instruction, but we don't
* generate an event.
*/
info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
(dar - bp->attr.bp_addr < bp->attr.bp_len));
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
bp->ctx->task->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
stepped = 0;
instr = 0;
if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
stepped = emulate_step(regs, instr);
/*
* emulate_step() could not execute it. We've failed in reliably
* handling the hw-breakpoint. Unregister it and throw a warning
* message to let the user know about it.
*/
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
perf_event_disable(bp);
goto out;
}
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
if (!info->extraneous_interrupt)
perf_bp_event(bp, regs);
set_dabr(info->address | info->type | DABR_TRANSLATION);
out:
rcu_read_unlock();
return rc;
}
/*
* Handle single-step exceptions following a DABR hit.
*/
int __kprobes single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
struct arch_hw_breakpoint *bp_info;
bp = current->thread.last_hit_ubp;
/*
* Check if we are single-stepping as a result of a
* previous HW Breakpoint exception
*/
if (!bp)
return NOTIFY_DONE;
bp_info = counter_arch_bp(bp);
/*
* We shall invoke the user-defined callback function in the single
* stepping handler to confirm to 'trigger-after-execute' semantics
*/
if (!bp_info->extraneous_interrupt)
perf_bp_event(bp, regs);
set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION);
current->thread.last_hit_ubp = NULL;
/*
* If the process was being single-stepped by ptrace, let the
* other single-step actions occur (e.g. generate SIGTRAP).
*/
if (test_thread_flag(TIF_SINGLESTEP))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
int ret = NOTIFY_DONE;
switch (val) {
case DIE_DABR_MATCH:
ret = hw_breakpoint_handler(data);
break;
case DIE_SSTEP:
ret = single_step_dabr_instruction(data);
break;
}
return ret;
}
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
unregister_hw_breakpoint(t->ptrace_bps[0]);
t->ptrace_bps[0] = NULL;
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
| gpl-2.0 |
omnirom/android_kernel_google_msm | arch/powerpc/sysdev/mv64x60_pci.c | 4714 | 4340 | /*
* PCI bus setup for Marvell mv64360/mv64460 host bridges (Discovery)
*
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* 2007 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/pci.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#define PCI_HEADER_TYPE_INVALID 0x7f /* Invalid PCI header type */
#ifdef CONFIG_SYSFS
/* 32-bit hex or dec stringified number + '\n' */
#define MV64X60_VAL_LEN_MAX 11
#define MV64X60_PCICFG_CPCI_HOTSWAP 0x68
static ssize_t mv64x60_hs_reg_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *phb;
u32 v;
if (off > 0)
return 0;
if (count < MV64X60_VAL_LEN_MAX)
return -EINVAL;
phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!phb)
return -ENODEV;
pci_read_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, &v);
pci_dev_put(phb);
return sprintf(buf, "0x%08x\n", v);
}
static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *phb;
u32 v;
if (off > 0)
return 0;
if (count <= 0)
return -EINVAL;
if (sscanf(buf, "%i", &v) != 1)
return -EINVAL;
phb = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!phb)
return -ENODEV;
pci_write_config_dword(phb, MV64X60_PCICFG_CPCI_HOTSWAP, v);
pci_dev_put(phb);
return count;
}
static struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */
.attr = {
.name = "hs_reg",
.mode = S_IRUGO | S_IWUSR,
},
.size = MV64X60_VAL_LEN_MAX,
.read = mv64x60_hs_reg_read,
.write = mv64x60_hs_reg_write,
};
static int __init mv64x60_sysfs_init(void)
{
struct device_node *np;
struct platform_device *pdev;
const unsigned int *prop;
np = of_find_compatible_node(NULL, NULL, "marvell,mv64360");
if (!np)
return 0;
prop = of_get_property(np, "hs_reg_valid", NULL);
of_node_put(np);
pdev = platform_device_register_simple("marvell,mv64360", 0, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return sysfs_create_bin_file(&pdev->dev.kobj, &mv64x60_hs_reg_attr);
}
subsys_initcall(mv64x60_sysfs_init);
#endif /* CONFIG_SYSFS */
static void __init mv64x60_pci_fixup_early(struct pci_dev *dev)
{
/*
* Set the host bridge hdr_type to an invalid value so that
* pci_setup_device() will ignore the host bridge.
*/
dev->hdr_type = PCI_HEADER_TYPE_INVALID;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360,
mv64x60_pci_fixup_early);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64460,
mv64x60_pci_fixup_early);
static int __init mv64x60_add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
const int *bus_range;
int primary;
memset(&rsrc, 0, sizeof(rsrc));
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc)) {
printk(KERN_ERR "No PCI reg property in device tree\n");
return -ENODEV;
}
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, rsrc.start, rsrc.start + 4, 0);
hose->self_busno = hose->first_busno;
printk(KERN_INFO "Found MV64x60 PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
(unsigned long long)rsrc.start, hose->first_busno,
hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
primary = (hose->first_busno == 0);
pci_process_bridge_OF_ranges(hose, dev, primary);
return 0;
}
void __init mv64x60_pci_init(void)
{
struct device_node *np;
for_each_compatible_node(np, "pci", "marvell,mv64360-pci")
mv64x60_add_bridge(np);
}
| gpl-2.0 |
CaptainThrowback/kernel_htc_m8whl_2.16.651.4 | arch/um/kernel/initrd.c | 4970 | 1727 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/init.h"
#include "linux/bootmem.h"
#include "linux/initrd.h"
#include "asm/types.h"
#include "init.h"
#include "os.h"
/* Changed by uml_initrd_setup, which is a setup */
static char *initrd __initdata = NULL;
static int load_initrd(char *filename, void *buf, int size);
static int __init read_initrd(void)
{
void *area;
long long size;
int err;
if (initrd == NULL)
return 0;
err = os_file_size(initrd, &size);
if (err)
return 0;
/*
* This is necessary because alloc_bootmem craps out if you
* ask for no memory.
*/
if (size == 0) {
printk(KERN_ERR "\"%s\" is a zero-size initrd\n", initrd);
return 0;
}
area = alloc_bootmem(size);
if (area == NULL)
return 0;
if (load_initrd(initrd, area, size) == -1)
return 0;
initrd_start = (unsigned long) area;
initrd_end = initrd_start + size;
return 0;
}
__uml_postsetup(read_initrd);
static int __init uml_initrd_setup(char *line, int *add)
{
initrd = line;
return 0;
}
__uml_setup("initrd=", uml_initrd_setup,
"initrd=<initrd image>\n"
" This is used to boot UML from an initrd image. The argument is the\n"
" name of the file containing the image.\n\n"
);
static int load_initrd(char *filename, void *buf, int size)
{
int fd, n;
fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
if (fd < 0) {
printk(KERN_ERR "Opening '%s' failed - err = %d\n", filename,
-fd);
return -1;
}
n = os_read_file(fd, buf, size);
if (n != size) {
printk(KERN_ERR "Read of %d bytes from '%s' failed, "
"err = %d\n", size,
filename, -n);
return -1;
}
os_close_file(fd);
return 0;
}
| gpl-2.0 |
gearslam/JB_LS970ZVC | arch/um/kernel/mem.c | 4970 | 7503 | /*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include "as-layout.h"
#include "init.h"
#include "kern.h"
#include "kern_util.h"
#include "mem_user.h"
#include "os.h"
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
EXPORT_SYMBOL(empty_zero_page);
/* allocated in paging_init and unchanged thereafter */
static unsigned long *empty_bad_page = NULL;
/*
* Initialized during boot, and readonly for initializing page tables
* afterwards
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Initialized at boot time, and readonly after that */
unsigned long long highmem;
int kmalloc_ok = 0;
/* Used during early boot */
static unsigned long brk_end;
#ifdef CONFIG_HIGHMEM
static void setup_highmem(unsigned long highmem_start,
unsigned long highmem_len)
{
struct page *page;
unsigned long highmem_pfn;
int i;
highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
page = &mem_map[highmem_pfn + i];
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
}
}
#endif
void __init mem_init(void)
{
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
/* Map in the area just after the brk now that kmalloc is about
* to be turned on.
*/
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
free_bootmem(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
/* this will put all low memory onto the freelists */
totalram_pages = free_all_bootmem();
max_low_pfn = totalram_pages;
#ifdef CONFIG_HIGHMEM
totalhigh_pages = highmem >> PAGE_SHIFT;
totalram_pages += totalhigh_pages;
#endif
num_physpages = totalram_pages;
max_pfn = totalram_pages;
printk(KERN_INFO "Memory: %luk available\n",
nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1;
#ifdef CONFIG_HIGHMEM
setup_highmem(end_iomem, highmem);
#endif
}
/*
* Create a page table and place a pointer to it in a middle page
* directory entry.
*/
static void __init one_page_table_init(pmd_t *pmd)
{
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
BUG();
}
}
static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
#endif
}
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
int i, j;
unsigned long vaddr;
vaddr = start;
i = pgd_index(vaddr);
j = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud))
one_md_table_init(pud);
pmd = pmd_offset(pud, vaddr);
for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
one_page_table_init(pmd);
vaddr += PMD_SIZE;
}
j = 0;
}
}
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
(vaddr)), (vaddr))
static void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
kmap_prot = PAGE_KERNEL;
}
static void __init init_highmem(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long vaddr;
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
kmap_init();
}
#endif /* CONFIG_HIGHMEM */
static void __init fixaddr_user_init( void)
{
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
phys_t p;
unsigned long v, vaddr = FIXADDR_USER_START;
if (!size)
return;
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
v = (unsigned long) alloc_bootmem_low_pages(size);
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
p += PAGE_SIZE) {
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pte_set_val(*pte, p, PAGE_READONLY);
}
#endif
}
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES], vaddr;
int i;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
(uml_physmem >> PAGE_SHIFT);
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
#endif
free_area_init(zones_size);
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
fixaddr_user_init();
#ifdef CONFIG_HIGHMEM
init_highmem();
#endif
}
/*
* This can't do anything because nothing in the kernel image can be freed
* since it's not in kernel physical memory.
*/
void free_initmem(void)
{
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
(end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
}
}
#endif
/* Allocate and free page tables. */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
memcpy(pgd + USER_PTRS_PER_PGD,
swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
return pte;
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
pgtable_page_ctor(pte);
return pte;
}
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
return pmd;
}
#endif
void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
| gpl-2.0 |
NamelessRom/android_kernel_google_msm | drivers/block/paride/pd.c | 5226 | 24220 | /*
pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
This is the high-level driver for parallel port IDE hard
drives based on chips supported by the paride module.
By default, the driver will autoprobe for a single parallel
port IDE drive, but if their individual parameters are
specified, the driver can handle up to 4 drives.
The behaviour of the pd driver can be altered by setting
some parameters from the insmod command line. The following
parameters are adjustable:
drive0 These four arguments can be arrays of
drive1 1-8 integers as follows:
drive2
drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
Where,
<prt> is the base of the parallel port address for
the corresponding drive. (required)
<pro> is the protocol number for the adapter that
supports this drive. These numbers are
logged by 'paride' when the protocol modules
are initialised. (0 if not given)
<uni> for those adapters that support chained
devices, this is the unit selector for the
chain of devices on the given port. It should
be zero for devices that don't support chaining.
(0 if not given)
<mod> this can be -1 to choose the best mode, or one
of the mode numbers supported by the adapter.
(-1 if not given)
<geo> this defaults to 0 to indicate that the driver
should use the CHS geometry provided by the drive
itself. If set to 1, the driver will provide
a logical geometry with 64 heads and 32 sectors
per track, to be consistent with most SCSI
drivers. (0 if not given)
<sby> set this to zero to disable the power saving
standby mode, if needed. (1 if not given)
<dly> some parallel ports require the driver to
go more slowly. -1 sets a default value that
should work with the chosen protocol. Otherwise,
set this to a small integer, the larger it is
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
<slv> IDE disks can be jumpered to master or slave.
Set this to 0 to choose the master drive, 1 to
choose the slave, -1 (the default) to choose the
first drive found.
major You may use this parameter to overide the
default major number (45) that this driver
will use. Be sure to change the device
name as well.
name This parameter is a character string that
contains the name the kernel will use for this
device (in /proc output, for instance).
(default "pd")
cluster The driver will attempt to aggregate requests
for adjacent blocks into larger multi-block
clusters. The maximum cluster size (in 512
byte sectors) is set with this parameter.
(default 64)
verbose This parameter controls the amount of logging
that the driver will do. Set it to 0 for
normal operation, 1 to see autoprobe progress
messages, or 2 to see additional debugging
output. (default 0)
nice This parameter controls the driver's use of
idle CPU time, at the expense of some speed.
If this driver is built into the kernel, you can use kernel
the following command line parameters, with the same values
as the corresponding module parameters listed above:
pd.drive0
pd.drive1
pd.drive2
pd.drive3
pd.cluster
pd.nice
In addition, you can use the parameter pd.disable to disable
the driver entirely.
*/
/* Changes:
1.01 GRG 1997.01.24 Restored pd_reset()
Added eject ioctl
1.02 GRG 1998.05.06 SMP spinlock changes,
Added slave support
1.03 GRG 1998.06.16 Eliminate an Ugh.
1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
1.05 GRG 1998.09.24 Added jumbo support
*/
#define PD_VERSION "1.05"
#define PD_MAJOR 45
#define PD_NAME "pd"
#define PD_UNITS 4
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
#include <linux/types.h>
static bool verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
static int nice = 0;
static int disable = 0;
static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
/* end of parameters */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h> /* for the eject ioctl */
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <linux/workqueue.h>
static DEFINE_MUTEX(pd_mutex);
static DEFINE_SPINLOCK(pd_lock);
module_param(verbose, bool, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
module_param(nice, int, 0);
module_param_array(drive0, int, NULL, 0);
module_param_array(drive1, int, NULL, 0);
module_param_array(drive2, int, NULL, 0);
module_param_array(drive3, int, NULL, 0);
#include "paride.h"
#define PD_BITS 4
/* numbers for "SCSI" geometry */
#define PD_LOG_HEADS 64
#define PD_LOG_SECTS 32
#define PD_ID_OFF 54
#define PD_ID_LEN 14
#define PD_MAX_RETRIES 5
#define PD_TMO 800 /* interrupt timeout in jiffies */
#define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
#define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
#define STAT_ERR 0x00001
#define STAT_INDEX 0x00002
#define STAT_ECC 0x00004
#define STAT_DRQ 0x00008
#define STAT_SEEK 0x00010
#define STAT_WRERR 0x00020
#define STAT_READY 0x00040
#define STAT_BUSY 0x00080
#define ERR_AMNF 0x00100
#define ERR_TK0NF 0x00200
#define ERR_ABRT 0x00400
#define ERR_MCR 0x00800
#define ERR_IDNF 0x01000
#define ERR_MC 0x02000
#define ERR_UNC 0x04000
#define ERR_TMO 0x10000
#define IDE_READ 0x20
#define IDE_WRITE 0x30
#define IDE_READ_VRFY 0x40
#define IDE_INIT_DEV_PARMS 0x91
#define IDE_STANDBY 0x96
#define IDE_ACKCHANGE 0xdb
#define IDE_DOORLOCK 0xde
#define IDE_DOORUNLOCK 0xdf
#define IDE_IDENTIFY 0xec
#define IDE_EJECT 0xed
#define PD_NAMELEN 8
struct pd_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
int access; /* count of active opens ... */
int capacity; /* Size of this volume in sectors */
int heads; /* physical geometry */
int sectors;
int cylinders;
int can_lba;
int drive; /* master=0 slave=1 */
int changed; /* Have we seen a disk change ? */
int removable; /* removable media device ? */
int standby;
int alt_geom;
char name[PD_NAMELEN]; /* pda, pdb, etc ... */
struct gendisk *gd;
};
static struct pd_unit pd[PD_UNITS];
static char pd_scratch[512]; /* scratch block buffer */
static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
"READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
"IDNF", "MC", "UNC", "???", "TMO"
};
static inline int status_reg(struct pd_unit *disk)
{
return pi_read_regr(disk->pi, 1, 6);
}
static inline int read_reg(struct pd_unit *disk, int reg)
{
return pi_read_regr(disk->pi, 0, reg);
}
static inline void write_status(struct pd_unit *disk, int val)
{
pi_write_regr(disk->pi, 1, 6, val);
}
static inline void write_reg(struct pd_unit *disk, int reg, int val)
{
pi_write_regr(disk->pi, 0, reg, val);
}
static inline u8 DRIVE(struct pd_unit *disk)
{
return 0xa0+0x10*disk->drive;
}
/* ide command interface */
static void pd_print_error(struct pd_unit *disk, char *msg, int status)
{
int i;
printk("%s: %s: status = 0x%x =", disk->name, msg, status);
for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
if (status & (1 << i))
printk(" %s", pd_errs[i]);
printk("\n");
}
static void pd_reset(struct pd_unit *disk)
{ /* called only for MASTER drive */
write_status(disk, 4);
udelay(50);
write_status(disk, 0);
udelay(250);
}
#define DBMSG(msg) ((verbose>1)?(msg):NULL)
static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
{ /* polled wait */
int k, r, e;
k = 0;
while (k < PD_SPIN) {
r = status_reg(disk);
k++;
if (((r & w) == w) && !(r & STAT_BUSY))
break;
udelay(PD_SPIN_DEL);
}
e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
if (k >= PD_SPIN)
e |= ERR_TMO;
if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
pd_print_error(disk, msg, e);
return e;
}
static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
{
write_reg(disk, 6, DRIVE(disk) + h);
write_reg(disk, 1, 0); /* the IDE task file */
write_reg(disk, 2, n);
write_reg(disk, 3, s);
write_reg(disk, 4, c0);
write_reg(disk, 5, c1);
write_reg(disk, 7, func);
udelay(1);
}
static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
{
int c1, c0, h, s;
if (disk->can_lba) {
s = block & 255;
c0 = (block >>= 8) & 255;
c1 = (block >>= 8) & 255;
h = ((block >>= 8) & 15) + 0x40;
} else {
s = (block % disk->sectors) + 1;
h = (block /= disk->sectors) % disk->heads;
c0 = (block /= disk->heads) % 256;
c1 = (block >>= 8);
}
pd_send_command(disk, count, s, h, c0, c1, func);
}
/* The i/o request engine */
enum action {Fail = 0, Ok = 1, Hold, Wait};
static struct request *pd_req; /* current request */
static enum action (*phase)(void);
static void run_fsm(void);
static void ps_tq_int(struct work_struct *work);
static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
static void schedule_fsm(void)
{
if (!nice)
schedule_delayed_work(&fsm_tq, 0);
else
schedule_delayed_work(&fsm_tq, nice-1);
}
static void ps_tq_int(struct work_struct *work)
{
run_fsm();
}
static enum action do_pd_io_start(void);
static enum action pd_special(void);
static enum action do_pd_read_start(void);
static enum action do_pd_write_start(void);
static enum action do_pd_read_drq(void);
static enum action do_pd_write_done(void);
static struct request_queue *pd_queue;
static int pd_claimed;
static struct pd_unit *pd_current; /* current request's drive */
static PIA *pi_current; /* current request's PIA */
static void run_fsm(void)
{
while (1) {
enum action res;
unsigned long saved_flags;
int stop = 0;
if (!phase) {
pd_current = pd_req->rq_disk->private_data;
pi_current = pd_current->pi;
phase = do_pd_io_start;
}
switch (pd_claimed) {
case 0:
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
}
switch(res = phase()) {
case Ok: case Fail:
pi_disconnect(pi_current);
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
if (!__blk_end_request_cur(pd_req,
res == Ok ? 0 : -EIO)) {
pd_req = blk_fetch_request(pd_queue);
if (!pd_req)
stop = 1;
}
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
case Hold:
schedule_fsm();
return;
case Wait:
pi_disconnect(pi_current);
pd_claimed = 0;
}
}
}
static int pd_retries = 0; /* i/o error retry count */
static int pd_block; /* address of next requested block */
static int pd_count; /* number of blocks still to do */
static int pd_run; /* sectors in current cluster */
static int pd_cmd; /* current command READ/WRITE */
static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
phase = pd_special;
return pd_special();
}
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_retries = 0;
if (pd_cmd == READ)
return do_pd_read_start();
else
return do_pd_write_start();
}
return Fail;
}
static enum action pd_special(void)
{
enum action (*func)(struct pd_unit *) = pd_req->special;
return func(pd_current);
}
static int pd_next_buf(void)
{
unsigned long saved_flags;
pd_count--;
pd_run--;
pd_buf += 512;
pd_block++;
if (!pd_run)
return 1;
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
__blk_end_request_cur(pd_req, 0);
pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
}
static unsigned long pd_timeout;
static enum action do_pd_read_start(void)
{
if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
if (pd_retries < PD_MAX_RETRIES) {
pd_retries++;
return Wait;
}
return Fail;
}
pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
phase = do_pd_read_drq;
pd_timeout = jiffies + PD_TMO;
return Hold;
}
static enum action do_pd_write_start(void)
{
if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
if (pd_retries < PD_MAX_RETRIES) {
pd_retries++;
return Wait;
}
return Fail;
}
pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
while (1) {
if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
if (pd_retries < PD_MAX_RETRIES) {
pd_retries++;
return Wait;
}
return Fail;
}
pi_write_block(pd_current->pi, pd_buf, 512);
if (pd_next_buf())
break;
}
phase = do_pd_write_done;
pd_timeout = jiffies + PD_TMO;
return Hold;
}
static inline int pd_ready(void)
{
return !(status_reg(pd_current) & STAT_BUSY);
}
static enum action do_pd_read_drq(void)
{
if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
return Hold;
while (1) {
if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
if (pd_retries < PD_MAX_RETRIES) {
pd_retries++;
phase = do_pd_read_start;
return Wait;
}
return Fail;
}
pi_read_block(pd_current->pi, pd_buf, 512);
if (pd_next_buf())
break;
}
return Ok;
}
static enum action do_pd_write_done(void)
{
if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
return Hold;
if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
if (pd_retries < PD_MAX_RETRIES) {
pd_retries++;
phase = do_pd_write_start;
return Wait;
}
return Fail;
}
return Ok;
}
/* special io requests */
/* According to the ATA standard, the default CHS geometry should be
available following a reset. Some Western Digital drives come up
in a mode where only LBA addresses are accepted until the device
parameters are initialised.
*/
static void pd_init_dev_parms(struct pd_unit *disk)
{
pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
IDE_INIT_DEV_PARMS);
udelay(300);
pd_wait_for(disk, 0, "Initialise device parameters");
}
static enum action pd_door_lock(struct pd_unit *disk)
{
if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
pd_wait_for(disk, STAT_READY, "Lock done");
}
return Ok;
}
static enum action pd_door_unlock(struct pd_unit *disk)
{
if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
pd_wait_for(disk, STAT_READY, "Lock done");
}
return Ok;
}
static enum action pd_eject(struct pd_unit *disk)
{
pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
pd_wait_for(disk, 0, DBMSG("before eject"));
pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
pd_wait_for(disk, 0, DBMSG("after eject"));
return Ok;
}
static enum action pd_media_check(struct pd_unit *disk)
{
int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
if (!(r & STAT_ERR)) {
pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
} else
disk->changed = 1; /* say changed if other error */
if (r & ERR_MC) {
disk->changed = 1;
pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
}
return Ok;
}
static void pd_standby_off(struct pd_unit *disk)
{
pd_wait_for(disk, 0, DBMSG("before STANDBY"));
pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
pd_wait_for(disk, 0, DBMSG("after STANDBY"));
}
static enum action pd_identify(struct pd_unit *disk)
{
int j;
char id[PD_ID_LEN + 1];
/* WARNING: here there may be dragons. reset() applies to both drives,
but we call it only on probing the MASTER. This should allow most
common configurations to work, but be warned that a reset can clear
settings on the SLAVE drive.
*/
if (disk->drive == 0)
pd_reset(disk);
write_reg(disk, 6, DRIVE(disk));
pd_wait_for(disk, 0, DBMSG("before IDENT"));
pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
return Fail;
pi_read_block(disk->pi, pd_scratch, 512);
disk->can_lba = pd_scratch[99] & 2;
disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
if (disk->can_lba)
disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
else
disk->capacity = disk->sectors * disk->heads * disk->cylinders;
for (j = 0; j < PD_ID_LEN; j++)
id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
j = PD_ID_LEN - 1;
while ((j >= 0) && (id[j] <= 0x20))
j--;
j++;
id[j] = 0;
disk->removable = pd_scratch[0] & 0x80;
printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
disk->name, id,
disk->drive ? "slave" : "master",
disk->capacity, disk->capacity / 2048,
disk->cylinders, disk->heads, disk->sectors,
disk->removable ? "removable" : "fixed");
if (disk->capacity)
pd_init_dev_parms(disk);
if (!disk->standby)
pd_standby_off(disk);
return Ok;
}
/* end of io request engine */
static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
pd_req = blk_fetch_request(q);
if (!pd_req)
return;
schedule_fsm();
}
static int pd_special_command(struct pd_unit *disk,
enum action (*func)(struct pd_unit *disk))
{
struct request *rq;
int err = 0;
rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->special = func;
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
blk_put_request(rq);
return err;
}
/* kernel glue structures */
static int pd_open(struct block_device *bdev, fmode_t mode)
{
struct pd_unit *disk = bdev->bd_disk->private_data;
mutex_lock(&pd_mutex);
disk->access++;
if (disk->removable) {
pd_special_command(disk, pd_media_check);
pd_special_command(disk, pd_door_lock);
}
mutex_unlock(&pd_mutex);
return 0;
}
static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct pd_unit *disk = bdev->bd_disk->private_data;
if (disk->alt_geom) {
geo->heads = PD_LOG_HEADS;
geo->sectors = PD_LOG_SECTS;
geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
} else {
geo->heads = disk->heads;
geo->sectors = disk->sectors;
geo->cylinders = disk->cylinders;
}
return 0;
}
static int pd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct pd_unit *disk = bdev->bd_disk->private_data;
switch (cmd) {
case CDROMEJECT:
mutex_lock(&pd_mutex);
if (disk->access == 1)
pd_special_command(disk, pd_eject);
mutex_unlock(&pd_mutex);
return 0;
default:
return -EINVAL;
}
}
static int pd_release(struct gendisk *p, fmode_t mode)
{
struct pd_unit *disk = p->private_data;
mutex_lock(&pd_mutex);
if (!--disk->access && disk->removable)
pd_special_command(disk, pd_door_unlock);
mutex_unlock(&pd_mutex);
return 0;
}
static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
{
struct pd_unit *disk = p->private_data;
int r;
if (!disk->removable)
return 0;
pd_special_command(disk, pd_media_check);
r = disk->changed;
disk->changed = 0;
return r ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int pd_revalidate(struct gendisk *p)
{
struct pd_unit *disk = p->private_data;
if (pd_special_command(disk, pd_identify) == 0)
set_capacity(p, disk->capacity);
else
set_capacity(p, 0);
return 0;
}
static const struct block_device_operations pd_fops = {
.owner = THIS_MODULE,
.open = pd_open,
.release = pd_release,
.ioctl = pd_ioctl,
.getgeo = pd_getgeo,
.check_events = pd_check_events,
.revalidate_disk= pd_revalidate
};
/* probing */
static void pd_probe_drive(struct pd_unit *disk)
{
struct gendisk *p = alloc_disk(1 << PD_BITS);
if (!p)
return;
strcpy(p->disk_name, disk->name);
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
disk->gd = p;
p->private_data = disk;
p->queue = pd_queue;
if (disk->drive == -1) {
for (disk->drive = 0; disk->drive <= 1; disk->drive++)
if (pd_special_command(disk, pd_identify) == 0)
return;
} else if (pd_special_command(disk, pd_identify) == 0)
return;
disk->gd = NULL;
put_disk(p);
}
static int pd_detect(void)
{
int found = 0, unit, pd_drive_count = 0;
struct pd_unit *disk;
for (unit = 0; unit < PD_UNITS; unit++) {
int *parm = *drives[unit];
struct pd_unit *disk = pd + unit;
disk->pi = &disk->pia;
disk->access = 0;
disk->changed = 1;
disk->capacity = 0;
disk->drive = parm[D_SLV];
snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
disk->alt_geom = parm[D_GEO];
disk->standby = parm[D_SBY];
if (parm[D_PRT])
pd_drive_count++;
}
if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
disk = pd;
if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
PI_PD, verbose, disk->name)) {
pd_probe_drive(disk);
if (!disk->gd)
pi_release(disk->pi);
}
} else {
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
int *parm = *drives[unit];
if (!parm[D_PRT])
continue;
if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
parm[D_UNI], parm[D_PRO], parm[D_DLY],
pd_scratch, PI_PD, verbose, disk->name)) {
pd_probe_drive(disk);
if (!disk->gd)
pi_release(disk->pi);
}
}
}
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
if (disk->gd) {
set_capacity(disk->gd, disk->capacity);
add_disk(disk->gd);
found = 1;
}
}
if (!found)
printk("%s: no valid drive found\n", name);
return found;
}
static int __init pd_init(void)
{
if (disable)
goto out1;
pd_queue = blk_init_queue(do_pd_request, &pd_lock);
if (!pd_queue)
goto out1;
blk_queue_max_hw_sectors(pd_queue, cluster);
if (register_blkdev(major, name))
goto out2;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PD_VERSION, major, cluster, nice);
if (!pd_detect())
goto out3;
return 0;
out3:
unregister_blkdev(major, name);
out2:
blk_cleanup_queue(pd_queue);
out1:
return -ENODEV;
}
static void __exit pd_exit(void)
{
struct pd_unit *disk;
int unit;
unregister_blkdev(major, name);
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
struct gendisk *p = disk->gd;
if (p) {
disk->gd = NULL;
del_gendisk(p);
put_disk(p);
pi_release(disk->pi);
}
}
blk_cleanup_queue(pd_queue);
}
MODULE_LICENSE("GPL");
module_init(pd_init)
module_exit(pd_exit)
| gpl-2.0 |
AKToronto/Bubba-Zombie | fs/nfsd/nfscache.c | 5738 | 7795 | /*
* Request reply cache. This is currently a global cache, but this may
* change in the future and be a per-client cache.
*
* This code is heavily inspired by the 44BSD implementation, although
* it does things a bit differently.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/slab.h>
#include "nfsd.h"
#include "cache.h"
/* Size of reply cache. Common values are:
* 4.3BSD: 128
* 4.4BSD: 256
* Solaris2: 1024
* DEC Unix: 512-4096
*/
#define CACHESIZE 1024
#define HASHSIZE 64
static struct hlist_head * cache_hash;
static struct list_head lru_head;
static int cache_disabled = 1;
/*
* Calculate the hash index from an XID.
*/
static inline u32 request_hash(u32 xid)
{
u32 h = xid;
h ^= (xid >> 24);
return h & (HASHSIZE-1);
}
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
*/
static DEFINE_SPINLOCK(cache_lock);
int nfsd_reply_cache_init(void)
{
struct svc_cacherep *rp;
int i;
INIT_LIST_HEAD(&lru_head);
i = CACHESIZE;
while (i) {
rp = kmalloc(sizeof(*rp), GFP_KERNEL);
if (!rp)
goto out_nomem;
list_add(&rp->c_lru, &lru_head);
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
INIT_HLIST_NODE(&rp->c_hash);
i--;
}
cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
if (!cache_hash)
goto out_nomem;
cache_disabled = 0;
return 0;
out_nomem:
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
nfsd_reply_cache_shutdown();
return -ENOMEM;
}
void nfsd_reply_cache_shutdown(void)
{
struct svc_cacherep *rp;
while (!list_empty(&lru_head)) {
rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
list_del(&rp->c_lru);
kfree(rp);
}
cache_disabled = 1;
kfree (cache_hash);
cache_hash = NULL;
}
/*
* Move cache entry to end of LRU list
*/
static void
lru_put_end(struct svc_cacherep *rp)
{
list_move_tail(&rp->c_lru, &lru_head);
}
/*
* Move a cache entry from one hash list to another
*/
static void
hash_refile(struct svc_cacherep *rp)
{
hlist_del_init(&rp->c_hash);
hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
}
/*
* Try to find an entry matching the current call in the cache. When none
* is found, we grab the oldest unlocked entry off the LRU list.
* Note that no operation within the loop may sleep.
*/
int
nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct hlist_node *hn;
struct hlist_head *rh;
struct svc_cacherep *rp;
__be32 xid = rqstp->rq_xid;
u32 proto = rqstp->rq_prot,
vers = rqstp->rq_vers,
proc = rqstp->rq_proc;
unsigned long age;
int type = rqstp->rq_cachetype;
int rtn;
rqstp->rq_cacherep = NULL;
if (cache_disabled || type == RC_NOCACHE) {
nfsdstats.rcnocache++;
return RC_DOIT;
}
spin_lock(&cache_lock);
rtn = RC_DOIT;
rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers &&
time_before(jiffies, rp->c_timestamp + 120*HZ) &&
memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
nfsdstats.rchits++;
goto found_entry;
}
}
nfsdstats.rcmisses++;
/* This loop shouldn't take more than a few iterations normally */
{
int safe = 0;
list_for_each_entry(rp, &lru_head, c_lru) {
if (rp->c_state != RC_INPROG)
break;
if (safe++ > CACHESIZE) {
printk("nfsd: loop in repcache LRU list\n");
cache_disabled = 1;
goto out;
}
}
}
/* All entries on the LRU are in-progress. This should not happen */
if (&rp->c_lru == &lru_head) {
static int complaints;
printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
if (++complaints > 5) {
printk(KERN_WARNING "nfsd: disabling repcache.\n");
cache_disabled = 1;
}
goto out;
}
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
rp->c_xid = xid;
rp->c_proc = proc;
memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
rp->c_prot = proto;
rp->c_vers = vers;
rp->c_timestamp = jiffies;
hash_refile(rp);
/* release any buffer */
if (rp->c_type == RC_REPLBUFF) {
kfree(rp->c_replvec.iov_base);
rp->c_replvec.iov_base = NULL;
}
rp->c_type = RC_NOCACHE;
out:
spin_unlock(&cache_lock);
return rtn;
found_entry:
/* We found a matching entry which is either in progress or done. */
age = jiffies - rp->c_timestamp;
rp->c_timestamp = jiffies;
lru_put_end(rp);
rtn = RC_DROPIT;
/* Request being processed or excessive rexmits */
if (rp->c_state == RC_INPROG || age < RC_DELAY)
goto out;
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
if (!rqstp->rq_secure && rp->c_secure)
goto out;
/* Compose RPC reply header */
switch (rp->c_type) {
case RC_NOCACHE:
break;
case RC_REPLSTAT:
svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
rtn = RC_REPLY;
break;
case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec))
goto out; /* should not happen */
rtn = RC_REPLY;
break;
default:
printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
rp->c_state = RC_UNUSED;
}
goto out;
}
/*
* Update a cache entry. This is called from nfsd_dispatch when
* the procedure has been executed and the complete reply is in
* rqstp->rq_res.
*
* We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste
* of memory for a cache with a max reply size of 100 bytes (diropokres).
*
* If we should start to use different types of cache entries tailored
* specifically for attrstat and fh's, we may save even more space.
*
* Also note that a cachetype of RC_NOCACHE can legally be passed when
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
void
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct svc_cacherep *rp;
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
int len;
if (!(rp = rqstp->rq_cacherep) || cache_disabled)
return;
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED;
return;
}
switch (cachetype) {
case RC_REPLSTAT:
if (len != 1)
printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
rp->c_replstat = *statp;
break;
case RC_REPLBUFF:
cachv = &rp->c_replvec;
cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
if (!cachv->iov_base) {
spin_lock(&cache_lock);
rp->c_state = RC_UNUSED;
spin_unlock(&cache_lock);
return;
}
cachv->iov_len = len << 2;
memcpy(cachv->iov_base, statp, len << 2);
break;
}
spin_lock(&cache_lock);
lru_put_end(rp);
rp->c_secure = rqstp->rq_secure;
rp->c_type = cachetype;
rp->c_state = RC_DONE;
rp->c_timestamp = jiffies;
spin_unlock(&cache_lock);
return;
}
/*
* Copy cached reply to current reply buffer. Should always fit.
* FIXME as reply is in a page, we should just attach the page, and
* keep a refcount....
*/
static int
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
struct kvec *vec = &rqstp->rq_res.head[0];
if (vec->iov_len + data->iov_len > PAGE_SIZE) {
printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
data->iov_len);
return 0;
}
memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
vec->iov_len += data->iov_len;
return 1;
}
| gpl-2.0 |
nikhiljan93/sony_yuga_kernel | net/ipv4/xfrm4_output.c | 10346 | 2354 | /*
* xfrm4_output.c - Common IPsec encapsulation code for IPv4.
* Copyright (c) 2004 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
static int xfrm4_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
struct dst_entry *dst;
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
goto out;
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
goto out;
dst = skb_dst(skb);
mtu = dst_mtu(dst);
if (skb->len > mtu) {
if (skb->sk)
ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
inet_sk(skb->sk)->inet_dport, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu));
ret = -EMSGSIZE;
}
out:
return ret;
}
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
err = xfrm4_tunnel_check_size(skb);
if (err)
return err;
XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
return xfrm4_extract_header(skb);
}
int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
err = xfrm_inner_extract_output(x, skb);
if (err)
return err;
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
skb->protocol = htons(ETH_P_IP);
return x->outer_mode->output2(x, skb);
}
EXPORT_SYMBOL(xfrm4_prepare_output);
int xfrm4_output_finish(struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER
if (!skb_dst(skb)->xfrm) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(skb);
}
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
#endif
skb->protocol = htons(ETH_P_IP);
return xfrm_output(skb);
}
int xfrm4_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
NULL, dst->dev,
x->outer_mode->afinfo->output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
| gpl-2.0 |
turuchan/ISW11SC_ICS_Kernel | drivers/media/video/pvrusb2/pvrusb2-debugifc.c | 11882 | 8583 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/string.h>
#include "pvrusb2-debugifc.h"
#include "pvrusb2-hdw.h"
#include "pvrusb2-debug.h"
struct debugifc_mask_item {
const char *name;
unsigned long msk;
};
static unsigned int debugifc_count_whitespace(const char *buf,
unsigned int count)
{
unsigned int scnt;
char ch;
for (scnt = 0; scnt < count; scnt++) {
ch = buf[scnt];
if (ch == ' ') continue;
if (ch == '\t') continue;
if (ch == '\n') continue;
break;
}
return scnt;
}
static unsigned int debugifc_count_nonwhitespace(const char *buf,
unsigned int count)
{
unsigned int scnt;
char ch;
for (scnt = 0; scnt < count; scnt++) {
ch = buf[scnt];
if (ch == ' ') break;
if (ch == '\t') break;
if (ch == '\n') break;
}
return scnt;
}
static unsigned int debugifc_isolate_word(const char *buf,unsigned int count,
const char **wstrPtr,
unsigned int *wlenPtr)
{
const char *wptr;
unsigned int consume_cnt = 0;
unsigned int wlen;
unsigned int scnt;
wptr = NULL;
wlen = 0;
scnt = debugifc_count_whitespace(buf,count);
consume_cnt += scnt; count -= scnt; buf += scnt;
if (!count) goto done;
scnt = debugifc_count_nonwhitespace(buf,count);
if (!scnt) goto done;
wptr = buf;
wlen = scnt;
consume_cnt += scnt; count -= scnt; buf += scnt;
done:
*wstrPtr = wptr;
*wlenPtr = wlen;
return consume_cnt;
}
static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
u32 *num_ptr)
{
u32 result = 0;
int radix = 10;
if ((count >= 2) && (buf[0] == '0') &&
((buf[1] == 'x') || (buf[1] == 'X'))) {
radix = 16;
count -= 2;
buf += 2;
} else if ((count >= 1) && (buf[0] == '0')) {
radix = 8;
}
while (count--) {
int val = hex_to_bin(*buf++);
if (val < 0 || val >= radix)
return -EINVAL;
result *= radix;
result += val;
}
*num_ptr = result;
return 0;
}
static int debugifc_match_keyword(const char *buf,unsigned int count,
const char *keyword)
{
unsigned int kl;
if (!keyword) return 0;
kl = strlen(keyword);
if (kl != count) return 0;
return !memcmp(buf,keyword,kl);
}
int pvr2_debugifc_print_info(struct pvr2_hdw *hdw,char *buf,unsigned int acnt)
{
int bcnt = 0;
int ccnt;
ccnt = scnprintf(buf, acnt, "Driver hardware description: %s\n",
pvr2_hdw_get_desc(hdw));
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
ccnt = scnprintf(buf,acnt,"Driver state info:\n");
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
ccnt = pvr2_hdw_state_report(hdw,buf,acnt);
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
return bcnt;
}
int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
char *buf,unsigned int acnt)
{
int bcnt = 0;
int ccnt;
int ret;
u32 gpio_dir,gpio_in,gpio_out;
struct pvr2_stream_stats stats;
struct pvr2_stream *sp;
ret = pvr2_hdw_is_hsm(hdw);
ccnt = scnprintf(buf,acnt,"USB link speed: %s\n",
(ret < 0 ? "FAIL" : (ret ? "high" : "full")));
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
gpio_dir = 0; gpio_in = 0; gpio_out = 0;
pvr2_hdw_gpio_get_dir(hdw,&gpio_dir);
pvr2_hdw_gpio_get_out(hdw,&gpio_out);
pvr2_hdw_gpio_get_in(hdw,&gpio_in);
ccnt = scnprintf(buf,acnt,"GPIO state: dir=0x%x in=0x%x out=0x%x\n",
gpio_dir,gpio_in,gpio_out);
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
ccnt = scnprintf(buf,acnt,"Streaming is %s\n",
pvr2_hdw_get_streaming(hdw) ? "on" : "off");
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
sp = pvr2_hdw_get_video_stream(hdw);
if (sp) {
pvr2_stream_get_stats(sp, &stats, 0);
ccnt = scnprintf(
buf,acnt,
"Bytes streamed=%u"
" URBs: queued=%u idle=%u ready=%u"
" processed=%u failed=%u\n",
stats.bytes_processed,
stats.buffers_in_queue,
stats.buffers_in_idle,
stats.buffers_in_ready,
stats.buffers_processed,
stats.buffers_failed);
bcnt += ccnt; acnt -= ccnt; buf += ccnt;
}
return bcnt;
}
static int pvr2_debugifc_do1cmd(struct pvr2_hdw *hdw,const char *buf,
unsigned int count)
{
const char *wptr;
unsigned int wlen;
unsigned int scnt;
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (!scnt) return 0;
count -= scnt; buf += scnt;
if (!wptr) return 0;
pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr);
if (debugifc_match_keyword(wptr,wlen,"reset")) {
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (!scnt) return -EINVAL;
count -= scnt; buf += scnt;
if (!wptr) return -EINVAL;
if (debugifc_match_keyword(wptr,wlen,"cpu")) {
pvr2_hdw_cpureset_assert(hdw,!0);
pvr2_hdw_cpureset_assert(hdw,0);
return 0;
} else if (debugifc_match_keyword(wptr,wlen,"bus")) {
pvr2_hdw_device_reset(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"soft")) {
return pvr2_hdw_cmd_powerup(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"deep")) {
return pvr2_hdw_cmd_deep_reset(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"firmware")) {
return pvr2_upload_firmware2(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"decoder")) {
return pvr2_hdw_cmd_decoder_reset(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"worker")) {
return pvr2_hdw_untrip(hdw);
} else if (debugifc_match_keyword(wptr,wlen,"usbstats")) {
pvr2_stream_get_stats(pvr2_hdw_get_video_stream(hdw),
NULL, !0);
return 0;
}
return -EINVAL;
} else if (debugifc_match_keyword(wptr,wlen,"cpufw")) {
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (!scnt) return -EINVAL;
count -= scnt; buf += scnt;
if (!wptr) return -EINVAL;
if (debugifc_match_keyword(wptr,wlen,"fetch")) {
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (scnt && wptr) {
count -= scnt; buf += scnt;
if (debugifc_match_keyword(wptr, wlen,
"prom")) {
pvr2_hdw_cpufw_set_enabled(hdw, 2, !0);
} else if (debugifc_match_keyword(wptr, wlen,
"ram8k")) {
pvr2_hdw_cpufw_set_enabled(hdw, 0, !0);
} else if (debugifc_match_keyword(wptr, wlen,
"ram16k")) {
pvr2_hdw_cpufw_set_enabled(hdw, 1, !0);
} else {
return -EINVAL;
}
}
pvr2_hdw_cpufw_set_enabled(hdw,0,!0);
return 0;
} else if (debugifc_match_keyword(wptr,wlen,"done")) {
pvr2_hdw_cpufw_set_enabled(hdw,0,0);
return 0;
} else {
return -EINVAL;
}
} else if (debugifc_match_keyword(wptr,wlen,"gpio")) {
int dir_fl = 0;
int ret;
u32 msk,val;
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (!scnt) return -EINVAL;
count -= scnt; buf += scnt;
if (!wptr) return -EINVAL;
if (debugifc_match_keyword(wptr,wlen,"dir")) {
dir_fl = !0;
} else if (!debugifc_match_keyword(wptr,wlen,"out")) {
return -EINVAL;
}
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (!scnt) return -EINVAL;
count -= scnt; buf += scnt;
if (!wptr) return -EINVAL;
ret = debugifc_parse_unsigned_number(wptr,wlen,&msk);
if (ret) return ret;
scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
if (wptr) {
ret = debugifc_parse_unsigned_number(wptr,wlen,&val);
if (ret) return ret;
} else {
val = msk;
msk = 0xffffffff;
}
if (dir_fl) {
ret = pvr2_hdw_gpio_chg_dir(hdw,msk,val);
} else {
ret = pvr2_hdw_gpio_chg_out(hdw,msk,val);
}
return ret;
}
pvr2_trace(PVR2_TRACE_DEBUGIFC,
"debugifc failed to recognize cmd: \"%.*s\"",wlen,wptr);
return -EINVAL;
}
int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf,
unsigned int count)
{
unsigned int bcnt = 0;
int ret;
while (count) {
for (bcnt = 0; bcnt < count; bcnt++) {
if (buf[bcnt] == '\n') break;
}
ret = pvr2_debugifc_do1cmd(hdw,buf,bcnt);
if (ret < 0) return ret;
if (bcnt < count) bcnt++;
buf += bcnt;
count -= bcnt;
}
return 0;
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 75 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
FrancescoCG/CrazySuperKernel-CM13-KLTE | arch/powerpc/sysdev/bestcomm/bcom_fec_rx_task.c | 14186 | 2688 | /*
* Bestcomm FEC RX task microcode
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Automatically created based on BestCommAPI-2.2/code_dma/image_rtos1/dma_image.hex
* on Tue Mar 22 11:19:38 2005 GMT
*/
#include <asm/types.h>
/*
* The header consists of the following fields:
* u32 magic;
* u8 desc_size;
* u8 var_size;
* u8 inc_size;
* u8 first_var;
* u8 reserved[8];
*
* The size fields contain the number of 32-bit words.
*/
u32 bcom_fec_rx_task[] = {
/* header */
0x4243544b,
0x18060709,
0x00000000,
0x00000000,
/* Task descriptors */
0x808220e3, /* LCD: idx0 = var1, idx1 = var4; idx1 <= var3; idx0 += inc4, idx1 += inc3 */
0x10601010, /* DRD1A: var4 = var2; FN=0 MORE init=3 WS=0 RS=0 */
0xb8800264, /* LCD: idx2 = *idx1, idx3 = var0; idx2 < var9; idx2 += inc4, idx3 += inc4 */
0x10001308, /* DRD1A: var4 = idx1; FN=0 MORE init=0 WS=0 RS=0 */
0x60140002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=2 RS=2 */
0x0cccfcca, /* DRD2B1: *idx3 = EU3(); EU3(*idx3,var10) */
0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
0xb8c58029, /* LCD: idx3 = *(idx1 + var00000015); idx3 once var0; idx3 += inc5 */
0x60000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT init=0 WS=0 RS=0 */
0x088cf8cc, /* DRD2B1: idx2 = EU3(); EU3(idx3,var12) */
0x991982f2, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var11; idx2 += inc6, idx3 += inc2 */
0x006acf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=1 RS=1 */
0x80004000, /* LCDEXT: idx2 = 0x00000000; ; */
0x9999802d, /* LCD: idx3 = idx3; idx3 once var0; idx3 += inc5 */
0x70000002, /* DRD2A: EU0=0 EU1=0 EU2=0 EU3=2 EXT MORE init=0 WS=0 RS=0 */
0x034cfc4e, /* DRD2B1: var13 = EU3(); EU3(*idx1,var14) */
0x00008868, /* DRD1A: idx2 = var13; FN=0 init=0 WS=0 RS=0 */
0x99198341, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var13; idx2 += inc0, idx3 += inc1 */
0x007ecf80, /* DRD1A: *idx3 = *idx0; FN=0 init=3 WS=3 RS=3 */
0x99198272, /* LCD: idx2 = idx2, idx3 = idx3; idx2 > var9; idx2 += inc6, idx3 += inc2 */
0x046acf80, /* DRD1A: *idx3 = *idx0; FN=0 INT init=3 WS=1 RS=1 */
0x9819002d, /* LCD: idx2 = idx0; idx2 once var0; idx2 += inc5 */
0x0060c790, /* DRD1A: *idx1 = *idx2; FN=0 init=3 WS=0 RS=0 */
0x000001f8, /* NOP */
/* VAR[9]-VAR[14] */
0x40000000,
0x7fff7fff,
0x00000000,
0x00000003,
0x40000008,
0x43ffffff,
/* INC[0]-INC[6] */
0x40000000,
0xe0000000,
0xe0000000,
0xa0000008,
0x20000000,
0x00000000,
0x4000ffff,
};
| gpl-2.0 |
MIPS/kernel-common | fs/ntfs/usnjrnl.c | 14954 | 2743 | /*
* usnjrnl.h - NTFS kernel transaction log ($UsnJrnl) handling. Part of the
* Linux-NTFS project.
*
* Copyright (c) 2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef NTFS_RW
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include "aops.h"
#include "debug.h"
#include "endian.h"
#include "time.h"
#include "types.h"
#include "usnjrnl.h"
#include "volume.h"
/**
* ntfs_stamp_usnjrnl - stamp the transaction log ($UsnJrnl) on an ntfs volume
* @vol: ntfs volume on which to stamp the transaction log
*
* Stamp the transaction log ($UsnJrnl) on the ntfs volume @vol and return
* 'true' on success and 'false' on error.
*
* This function assumes that the transaction log has already been loaded and
* consistency checked by a call to fs/ntfs/super.c::load_and_init_usnjrnl().
*/
bool ntfs_stamp_usnjrnl(ntfs_volume *vol)
{
ntfs_debug("Entering.");
if (likely(!NVolUsnJrnlStamped(vol))) {
sle64 stamp;
struct page *page;
USN_HEADER *uh;
page = ntfs_map_page(vol->usnjrnl_max_ino->i_mapping, 0);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read from "
"$UsnJrnl/$DATA/$Max attribute.");
return false;
}
uh = (USN_HEADER*)page_address(page);
stamp = get_current_ntfs_time();
ntfs_debug("Stamping transaction log ($UsnJrnl): old "
"journal_id 0x%llx, old lowest_valid_usn "
"0x%llx, new journal_id 0x%llx, new "
"lowest_valid_usn 0x%llx.",
(long long)sle64_to_cpu(uh->journal_id),
(long long)sle64_to_cpu(uh->lowest_valid_usn),
(long long)sle64_to_cpu(stamp),
i_size_read(vol->usnjrnl_j_ino));
uh->lowest_valid_usn =
cpu_to_sle64(i_size_read(vol->usnjrnl_j_ino));
uh->journal_id = stamp;
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
/* Set the flag so we do not have to do it again on remount. */
NVolSetUsnJrnlStamped(vol);
}
ntfs_debug("Done.");
return true;
}
#endif /* NTFS_RW */
| gpl-2.0 |
lukier/linux-samsung | drivers/s390/scsi/zfcp_fc.c | 619 | 28608 | /*
* zfcp device driver
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/random.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
struct kmem_cache *zfcp_fc_req_cache;
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
[ELS_ADDR_FMT_DOM] = 0xFF0000,
[ELS_ADDR_FMT_FAB] = 0x000000,
};
static bool no_auto_port_rescan;
module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
static unsigned int port_scan_backoff = 500;
module_param(port_scan_backoff, uint, 0600);
MODULE_PARM_DESC(port_scan_backoff,
"upper limit of port scan random backoff in msecs (default 500)");
static unsigned int port_scan_ratelimit = 60000;
module_param(port_scan_ratelimit, uint, 0600);
MODULE_PARM_DESC(port_scan_ratelimit,
"minimum interval between port scans in msecs (default 60000)");
unsigned int zfcp_fc_port_scan_backoff(void)
{
if (!port_scan_backoff)
return 0;
return get_random_int() % port_scan_backoff;
}
static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
{
unsigned long interval = msecs_to_jiffies(port_scan_ratelimit);
unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff());
adapter->next_port_scan = jiffies + interval + backoff;
}
static void zfcp_fc_port_scan(struct zfcp_adapter *adapter)
{
unsigned long now = jiffies;
unsigned long next = adapter->next_port_scan;
unsigned long delay = 0, max;
/* delay only needed within waiting period */
if (time_before(now, next)) {
delay = next - now;
/* paranoia: never ever delay scans longer than specified */
max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff);
delay = min(delay, max);
}
queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay);
}
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
zfcp_fc_port_scan(adapter);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (!no_auto_port_rescan)
return;
zfcp_fc_port_scan(adapter);
}
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
*/
void zfcp_fc_post_event(struct work_struct *work)
{
struct zfcp_fc_event *event = NULL, *tmp = NULL;
LIST_HEAD(tmp_lh);
struct zfcp_fc_events *events = container_of(work,
struct zfcp_fc_events, work);
struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
events);
spin_lock_bh(&events->list_lock);
list_splice_init(&events->list, &tmp_lh);
spin_unlock_bh(&events->list_lock);
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
event->code, event->data);
list_del(&event->list);
kfree(event);
}
}
/**
* zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
* @adapter: The adapter where to enqueue the event
* @event_code: The event code (as defined in fc_host_event_code in
* scsi_transport_fc.h)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
if (!event)
return;
event->code = event_code;
event->data = event_data;
spin_lock(&adapter->events.list_lock);
list_add_tail(&event->list, &adapter->events.list);
spin_unlock(&adapter->events.list_lock);
queue_work(adapter->work_queue, &adapter->events.work);
}
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
wait_event(wka_port->completion_wq,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
return -EIO;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_fc_wka_port *wka_port =
container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
{
if (!gs)
return;
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fc_els_rscn_page *page)
{
unsigned long flags;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
unsigned int afmt;
head = (struct fc_els_rscn *) status_buffer->payload.data;
page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
page);
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer;
struct fc_els_flogi *plogi;
status_buffer = (struct fsf_status_read_buffer *) req->data;
plogi = (struct fc_els_flogi *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fc_els_logo *logo =
(struct fc_els_logo *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
}
/**
* zfcp_fc_incoming_els - handle incoming ELS
* @fsf_req - request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_dbf_san_in_els("fciels1", fsf_req);
if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
if (ct_els->status)
return;
if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
/* looks like a valid d_id */
ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
{
complete(data);
}
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
ct_hdr->ct_rev = FC_CT_REV;
ct_hdr->ct_fs_type = FC_FST_DIR;
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
ct_hdr->ct_cmd = cmd;
ct_hdr->ct_mr_size = mr_size / 4;
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
fc_req->ct_els.port = port;
fc_req->ct_els.handler = zfcp_fc_complete;
fc_req->ct_els.handler_data = &completion;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
* zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
int ret;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
goto out;
}
if (!port->d_id) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
goto out;
}
zfcp_erp_port_reopen(port, 0, "fcgpn_3");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
* @port: The zfcp_port to lookup the d_id for.
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
put_device(&port->dev);
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
* @plogi: plogi payload
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
if (plogi->fl_wwpn != port->wwpn) {
port->d_id = 0;
dev_warn(&port->adapter->ccw_device->dev,
"A port opened with WWPN 0x%016Lx returned data that "
"identifies it as WWPN 0x%016Lx\n",
(unsigned long long) port->wwpn,
(unsigned long long) plogi->fl_wwpn);
return;
}
port->wwnn = plogi->fl_wwnn;
port->maxframe_size = plogi->fl_csp.sp_bb_data;
if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS1;
if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS2;
if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS3;
if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS4;
}
static void zfcp_fc_adisc_handler(void *data)
{
struct zfcp_fc_req *fc_req = data;
struct zfcp_port *port = fc_req->ct_els.port;
struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
goto out;
}
if (!port->wwnn)
port->wwnn = adisc_resp->adisc_wwnn;
if ((port->wwpn != adisc_resp->adisc_wwpn) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2");
goto out;
}
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
fc_req->ct_els.port = port;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
fc_req->ct_els.handler = zfcp_fc_adisc_handler;
fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
{
struct zfcp_port *port =
container_of(work, struct zfcp_port, test_link_work);
int retval;
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
put_device(&port->dev);
}
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return NULL;
if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
sizeof(struct zfcp_fc_gpn_ft_req));
return fc_req;
}
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
return ret;
}
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list))
return;
list_move_tail(&port->list, lh);
}
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
unsigned long flags;
LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
if (hdr->ct_reason == FC_BA_RJT_UNABLE)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
hdr->ct_mr_size);
return -E2BIG;
}
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
last = acc->fp_flags & FC_NS_FID_LAST;
d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
continue;
port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1");
else if (PTR_ERR(port) != -EEXIST)
ret = PTR_ERR(port);
}
zfcp_erp_wait(adapter);
write_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
zfcp_fc_validate_port(port, &remove_lh);
write_unlock_irqrestore(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2");
device_unregister(&port->dev);
}
return ret;
}
/**
* zfcp_fc_scan_ports - scan remote ports and attach new ports
* @work: reference to scheduled work
*/
void zfcp_fc_scan_ports(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter,
scan_work);
int ret, i;
struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
zfcp_fc_port_scan_time(adapter);
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
fc_req = zfcp_alloc_sg_env(buf_num);
if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
char devno[] = "DEVNO:";
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
int ret;
zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (ret)
return ret;
wait_for_completion(&completion);
if (ct_els->status)
return ct_els->status;
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
!(strstr(gspn_rsp->gspn.fp_name, devno)))
snprintf(fc_host_symbolic_name(adapter->scsi_host),
FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
gspn_rsp->gspn.fp_name, devno,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
strlcpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
}
static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct Scsi_Host *shost = adapter->scsi_host;
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
int ret, len;
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
FC_SYMBOLIC_NAME_SIZE);
rspn_req->rspn.fr_name_len = len;
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
}
/**
* zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
* @work: ns_up_work of the adapter where to update the symbolic port name
*
* Retrieve the current symbolic port name that may have been set by
* the hardware using the GSPN request and update the fc_host
* symbolic_name sysfs attribute. When running in NPIV mode (and hence
* the port name is unique for this system), update the symbolic port
* name to add Linux specific information and update the FC nameserver
* using the RSPN request.
*/
void zfcp_fc_sym_name_update(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
ns_up_work);
int ret;
struct zfcp_fc_req *fc_req;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return;
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out_free;
ret = zfcp_fc_gspn(adapter, fc_req);
if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
goto out_ds_put;
memset(fc_req, 0, sizeof(*fc_req));
zfcp_fc_rspn(adapter, fc_req);
out_ds_put:
zfcp_fc_wka_port_put(&adapter->gs->ds);
out_free:
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
job->job_done(job);
}
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
return &adapter->gs->as;
case FC_FST_MGMT:
return &adapter->gs->ms;
case FC_FST_TIME:
return &adapter->gs->ts;
break;
case FC_FST_DIR:
return &adapter->gs->ds;
break;
default:
return NULL;
}
}
static void zfcp_fc_ct_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
zfcp_fc_wka_port_put(wka_port);
zfcp_fc_ct_els_job_handler(data);
}
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
struct fc_rport *rport = job->rport;
struct zfcp_port *port;
u32 d_id;
if (rport) {
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -EINVAL;
d_id = port->d_id;
put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
struct zfcp_fsf_ct_els *ct = job->dd_data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
if (!wka_port)
return -EINVAL;
ret = zfcp_fc_wka_port_get(wka_port);
if (ret)
return ret;
ct->handler = zfcp_fc_ct_job_handler;
ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
if (ret)
zfcp_fc_wka_port_put(wka_port);
return ret;
}
int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
shost = job->rport ? rport_to_shost(job->rport) : job->shost;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
ct_els->req = job->request_payload.sg_list;
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
switch (job->request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
case FC_BSG_RPT_CT:
case FC_BSG_HST_CT:
return zfcp_fc_exec_ct_job(job, adapter);
default:
return -EINVAL;
}
}
int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
}
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_fc_wka_ports *wka_ports;
wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
adapter->gs = wka_ports;
zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
return 0;
}
void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
kfree(adapter->gs);
adapter->gs = NULL;
}
| gpl-2.0 |
jogger0703/linux | drivers/scsi/aacraid/aachba.c | 619 | 101580 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
#include <asm/uaccess.h>
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
/* values for inqd_pdt: Peripheral device type in plain English */
#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
#define INQD_PDT_PROC 0x03 /* Processor device */
#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
/*
* Sense codes
*/
#define SENCODE_NO_SENSE 0x00
#define SENCODE_END_OF_DATA 0x00
#define SENCODE_BECOMING_READY 0x04
#define SENCODE_INIT_CMD_REQUIRED 0x04
#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
#define SENCODE_INVALID_COMMAND 0x20
#define SENCODE_LBA_OUT_OF_RANGE 0x21
#define SENCODE_INVALID_CDB_FIELD 0x24
#define SENCODE_LUN_NOT_SUPPORTED 0x25
#define SENCODE_INVALID_PARAM_FIELD 0x26
#define SENCODE_PARAM_NOT_SUPPORTED 0x26
#define SENCODE_PARAM_VALUE_INVALID 0x26
#define SENCODE_RESET_OCCURRED 0x29
#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
#define SENCODE_DIAGNOSTIC_FAILURE 0x40
#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
#define SENCODE_INVALID_MESSAGE_ERROR 0x49
#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
#define SENCODE_OVERLAPPED_COMMAND 0x4E
/*
* Additional sense codes
*/
#define ASENCODE_NO_SENSE 0x00
#define ASENCODE_END_OF_DATA 0x05
#define ASENCODE_BECOMING_READY 0x01
#define ASENCODE_INIT_CMD_REQUIRED 0x02
#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
#define ASENCODE_INVALID_COMMAND 0x00
#define ASENCODE_LBA_OUT_OF_RANGE 0x00
#define ASENCODE_INVALID_CDB_FIELD 0x00
#define ASENCODE_LUN_NOT_SUPPORTED 0x00
#define ASENCODE_INVALID_PARAM_FIELD 0x00
#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
#define ASENCODE_PARAM_VALUE_INVALID 0x02
#define ASENCODE_RESET_OCCURRED 0x00
#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
#define ASENCODE_OVERLAPPED_COMMAND 0x00
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
#define BYTE2(x) (unsigned char)((x) >> 16)
#define BYTE3(x) (unsigned char)((x) >> 24)
/* MODE_SENSE data format */
typedef struct {
struct {
u8 data_length;
u8 med_type;
u8 dev_par;
u8 bd_length;
} __attribute__((packed)) hd;
struct {
u8 dens_code;
u8 block_count[3];
u8 reserved;
u8 block_length[3];
} __attribute__((packed)) bd;
u8 mpc_buf[3];
} __attribute__((packed)) aac_modep_data;
/* MODE_SENSE_10 data format */
typedef struct {
struct {
u8 data_length[2];
u8 med_type;
u8 dev_par;
u8 rsrvd[2];
u8 bd_length[2];
} __attribute__((packed)) hd;
struct {
u8 dens_code;
u8 block_count[3];
u8 reserved;
u8 block_length[3];
} __attribute__((packed)) bd;
u8 mpc_buf[3];
} __attribute__((packed)) aac_modep10_data;
/*------------------------------------------------------------------------------
* S T R U C T S / T Y P E D E F S
*----------------------------------------------------------------------------*/
/* SCSI inquiry data */
struct inquiry_data {
u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
u8 inqd_pad1[2];/* Reserved - must be zero */
u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
u8 inqd_vid[8]; /* Vendor ID */
u8 inqd_pid[16];/* Product ID */
u8 inqd_prl[4]; /* Product Revision Level */
};
/* Added for VPD 0x83 */
typedef struct {
u8 CodeSet:4; /* VPD_CODE_SET */
u8 Reserved:4;
u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
u8 Reserved2:4;
u8 Reserved3;
u8 IdentifierLength;
u8 VendId[8];
u8 ProductId[16];
u8 SerialNumber[8]; /* SN in ASCII */
} TVPD_ID_Descriptor_Type_1;
typedef struct {
u8 CodeSet:4; /* VPD_CODE_SET */
u8 Reserved:4;
u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */
u8 Reserved2:4;
u8 Reserved3;
u8 IdentifierLength;
struct TEU64Id {
u32 Serial;
/* The serial number supposed to be 40 bits,
* bit we only support 32, so make the last byte zero. */
u8 Reserved;
u8 VendId[3];
} EU64Id;
} TVPD_ID_Descriptor_Type_2;
typedef struct {
u8 DeviceType:5;
u8 DeviceTypeQualifier:3;
u8 PageCode;
u8 Reserved;
u8 PageLength;
TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
} TVPD_Page83;
/*
* M O D U L E G L O B A L S
*/
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
struct aac_raw_io2 *rio2, int sg_max);
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
#endif
/*
* Non dasd selection is handled entirely in aachba now
*/
static int nondasd = -1;
static int aac_cache = 2; /* WCE=0 to avoid performance problems */
static int dacmode = -1;
int aac_msi;
int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
" 0=off, 1=on");
module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
" 0=off, 1=on");
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
" 0=off, 1=on");
module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
"\tbit 0 - Disable FUA in WRITE SCSI commands\n"
"\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
"\tbit 2 - Disable only if Battery is protecting Cache");
module_param(dacmode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
" 0=off, 1=on");
module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
" adapter for foreign arrays.\n"
"This is typically needed in systems that do not have a BIOS."
" 0=off, 1=on");
module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(msi, "IRQ handling."
" 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
" adapter to have it's kernel up and\n"
"running. This is typically adjusted for large systems that do not"
" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
" applications to pick up AIFs before\n"
"deregistering them. This is typically adjusted for heavily burdened"
" systems.");
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
" blocks (FIB) allocated. Valid values are 512 and down. Default is"
" to use suggestion from Firmware.");
int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
" suggestion from Firmware.");
int update_interval = 30 * 60;
module_param(update_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
" updates issued to adapter.");
int check_interval = 24 * 60 * 60;
module_param(check_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
" checks.");
int aac_check_reset = 1;
module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
" adapter. a value of -1 forces the reset to adapters programmed to"
" ignore it.");
int expose_physicals = -1;
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
" -1=protect 0=off, 1=on");
int aac_reset_devices;
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
int aac_wwn = 1;
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
"\t0 - Disable\n"
"\t1 - Array Meta Data Signature (default)\n"
"\t2 - Adapter Serial Number");
static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
struct fib *fibptr) {
struct scsi_device *device;
if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
return 0;
}
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
if (unlikely(!device || !scsi_device_online(device))) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
return 0;
}
return 1;
}
/**
* aac_get_config_status - check the adapter configuration
* @common: adapter to query
*
* Query config status, and commit the configuration if needed.
*/
int aac_get_config_status(struct aac_dev *dev, int commit_flag)
{
int status = 0;
struct fib * fibptr;
if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(fibptr);
{
struct aac_get_config_status *dinfo;
dinfo = (struct aac_get_config_status *) fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
}
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_get_config_status),
FsaNormal,
1, 1,
NULL, NULL);
if (status < 0) {
printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
} else {
struct aac_get_config_status_resp *reply
= (struct aac_get_config_status_resp *) fib_data(fibptr);
dprintk((KERN_WARNING
"aac_get_config_status: response=%d status=%d action=%d\n",
le32_to_cpu(reply->response),
le32_to_cpu(reply->status),
le32_to_cpu(reply->data.action)));
if ((le32_to_cpu(reply->response) != ST_OK) ||
(le32_to_cpu(reply->status) != CT_OK) ||
(le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
status = -EINVAL;
}
}
/* Do not set XferState to zero unless receives a response from F/W */
if (status >= 0)
aac_fib_complete(fibptr);
/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
if (status >= 0) {
if ((aac_commit == 1) || commit_flag) {
struct aac_commit_config * dinfo;
aac_fib_init(fibptr);
dinfo = (struct aac_commit_config *) fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_commit_config),
FsaNormal,
1, 1,
NULL, NULL);
/* Do not set XferState to zero unless
* receives a response from F/W */
if (status >= 0)
aac_fib_complete(fibptr);
} else if (aac_commit == 0) {
printk(KERN_WARNING
"aac_get_config_status: Foreign device configurations are being ignored\n");
}
}
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibptr);
return status;
}
static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
{
char inq_data;
scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
inq_data &= 0xdf;
scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
}
}
/**
* aac_get_containers - list containers
* @common: adapter to probe
*
* Make a list of all containers on this controller
*/
int aac_get_containers(struct aac_dev *dev)
{
struct fsa_dev_info *fsa_dev_ptr;
u32 index;
int status = 0;
struct fib * fibptr;
struct aac_get_container_count *dinfo;
struct aac_get_container_count_resp *dresp;
int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(fibptr);
dinfo = (struct aac_get_container_count *) fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_get_container_count),
FsaNormal,
1, 1,
NULL, NULL);
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_SUPPORTED_240_VOLUMES) {
maximum_num_containers =
le32_to_cpu(dresp->MaxSimpleVolumes);
}
aac_fib_complete(fibptr);
}
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibptr);
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
GFP_KERNEL);
if (!fsa_dev_ptr)
return -ENOMEM;
dev->fsa_dev = fsa_dev_ptr;
dev->maximum_num_containers = maximum_num_containers;
for (index = 0; index < dev->maximum_num_containers; ) {
fsa_dev_ptr[index].devname[0] = '\0';
status = aac_probe_container(dev, index);
if (status < 0) {
printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
break;
}
/*
* If there are no more containers, then stop asking.
*/
if (++index >= status)
break;
}
return status;
}
static void get_container_name_callback(void *context, struct fib * fibptr)
{
struct aac_get_name_resp * get_name_reply;
struct scsi_cmnd * scsicmd;
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
BUG_ON(fibptr == NULL);
get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
char *sp = get_name_reply->data;
sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
while (*sp == ' ')
++sp;
if (*sp) {
struct inquiry_data inq;
char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
int count = sizeof(d);
char *dp = d;
do {
*dp++ = (*sp) ? *sp++ : ' ';
} while (--count > 0);
scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
memcpy(inq.inqd_pid, d, sizeof(d));
scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
}
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
/**
* aac_get_container_name - get container name, none blocking.
*/
static int aac_get_container_name(struct scsi_cmnd * scsicmd)
{
int status;
struct aac_get_name *dinfo;
struct fib * cmd_fibcontext;
struct aac_dev * dev;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (!(cmd_fibcontext = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof (struct aac_get_name),
FsaNormal,
0, 1,
(fib_callback)get_container_name_callback,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return -1;
}
static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
{
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
return aac_scsi_cmd(scsicmd);
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
if (!aac_valid_context(scsicmd, fibptr))
return;
scsicmd->SCp.Status = 0;
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
fsa_dev_ptr += scmd_id(scsicmd);
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
fsa_dev_ptr->block_size = 0x200;
} else {
fsa_dev_ptr->block_size =
le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
}
fsa_dev_ptr->valid = 1;
/* sense_key holds the current state of the spin-up */
if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
fsa_dev_ptr->sense_data.sense_key = NOT_READY;
else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
fsa_dev_ptr->size
= ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
(((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
}
if ((fsa_dev_ptr->valid & 1) == 0)
fsa_dev_ptr->valid = 0;
scsicmd->SCp.Status = le32_to_cpu(dresp->count);
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
scsicmd->SCp.ptr = NULL;
(*callback)(scsicmd);
return;
}
static void _aac_probe_container1(void * context, struct fib * fibptr)
{
struct scsi_cmnd * scsicmd;
struct aac_mount * dresp;
struct aac_query_mount *dinfo;
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE))
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
_aac_probe_container2(context, fibptr);
return;
}
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
aac_fib_init(fibptr);
dinfo = (struct aac_query_mount *)fib_data(fibptr);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
dinfo->command = cpu_to_le32(VM_NameServe64);
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
_aac_probe_container2,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS)
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
else if (status < 0) {
/* Inherit results from VM_NameServe, if any */
dresp->status = cpu_to_le32(ST_OK);
_aac_probe_container2(context, fibptr);
}
}
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
{
struct fib * fibptr;
int status = -ENOMEM;
if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
struct aac_query_mount *dinfo;
aac_fib_init(fibptr);
dinfo = (struct aac_query_mount *)fib_data(fibptr);
if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
dinfo->command = cpu_to_le32(VM_NameServe);
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
scsicmd->SCp.ptr = (char *)callback;
status = aac_fib_send(ContainerCommand,
fibptr,
sizeof(struct aac_query_mount),
FsaNormal,
0, 1,
_aac_probe_container1,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
if (status < 0) {
scsicmd->SCp.ptr = NULL;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
}
if (status < 0) {
struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
if (fsa_dev_ptr) {
fsa_dev_ptr += scmd_id(scsicmd);
if ((fsa_dev_ptr->valid & 1) == 0) {
fsa_dev_ptr->valid = 0;
return (*callback)(scsicmd);
}
}
}
return status;
}
/**
* aac_probe_container - query a logical volume
* @dev: device to query
* @cid: container identifier
*
* Queries the controller about the given volume. The volume information
* is updated in the struct fsa_dev_info structure rather than returned.
*/
static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
{
scsicmd->device = NULL;
return 0;
}
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
if (!scsicmd || !scsidev) {
kfree(scsicmd);
kfree(scsidev);
return -ENOMEM;
}
scsicmd->list.next = NULL;
scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
scsicmd->device = scsidev;
scsidev->sdev_state = 0;
scsidev->id = cid;
scsidev->host = dev->scsi_host_ptr;
if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
status = scsicmd->SCp.Status;
kfree(scsicmd);
return status;
}
/* Local Structure to set SCSI inquiry data strings */
struct scsi_inq {
char vid[8]; /* Vendor ID */
char pid[16]; /* Product ID */
char prl[4]; /* Product Revision Level */
};
/**
* InqStrCopy - string merge
* @a: string to copy from
* @b: string to copy to
*
* Copy a String from one location to another
* without copying \0
*/
static void inqstrcpy(char *a, char *b)
{
while (*a != (char)0)
*b++ = *a++;
}
static char *container_types[] = {
"None",
"Volume",
"Mirror",
"Stripe",
"RAID5",
"SSRW",
"SSRO",
"Morph",
"Legacy",
"RAID4",
"RAID10",
"RAID00",
"V-MIRRORS",
"PSEUDO R4",
"RAID50",
"RAID5D",
"RAID5D0",
"RAID1E",
"RAID6",
"RAID60",
"Unknown"
};
char * get_container_type(unsigned tindex)
{
if (tindex >= ARRAY_SIZE(container_types))
tindex = ARRAY_SIZE(container_types) - 1;
return container_types[tindex];
}
/* Function: setinqstr
*
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI inquiry data strings for vendor, product
* and revision level. Allows strings to be set in platform dependent
* files instead of in OS dependent driver source.
*/
static void setinqstr(struct aac_dev *dev, void *data, int tindex)
{
struct scsi_inq *str;
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
memset(str, ' ', sizeof(*str));
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
char * cp = dev->supplement_adapter_info.AdapterTypeText;
int c;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
c = sizeof(str->vid);
while (*cp && *cp != ' ' && --c)
++cp;
c = *cp;
*cp = '\0';
inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
}
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
c = 0;
if (strlen(cp) > sizeof(str->pid)) {
c = cp[sizeof(str->pid)];
cp[sizeof(str->pid)] = '\0';
}
inqstrcpy (cp, str->pid);
if (c)
cp[sizeof(str->pid)] = c;
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
inqstrcpy (mp->vname, str->vid);
/* last six chars reserved for vol type */
inqstrcpy (mp->model, str->pid);
}
if (tindex < ARRAY_SIZE(container_types)){
char *findit = str->pid;
for ( ; *findit != ' '; findit++); /* walk till we find a space */
/* RAID is superfluous in the context of a RAID device */
if (memcmp(findit-4, "RAID", 4) == 0)
*(findit -= 4) = ' ';
if (((findit - str->pid) + strlen(container_types[tindex]))
< (sizeof(str->pid) + sizeof(str->prl)))
inqstrcpy (container_types[tindex], findit + 1);
}
inqstrcpy ("V1.0", str->prl);
}
static void get_container_serial_callback(void *context, struct fib * fibptr)
{
struct aac_get_serial_resp * get_serial_reply;
struct scsi_cmnd * scsicmd;
BUG_ON(fibptr == NULL);
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
/*Check to see if it's for VPD 0x83 or 0x80 */
if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
int i;
TVPD_Page83 VPDPage83Data;
memset(((u8 *)&VPDPage83Data), 0,
sizeof(VPDPage83Data));
/* DIRECT_ACCESS_DEVIC */
VPDPage83Data.DeviceType = 0;
/* DEVICE_CONNECTED */
VPDPage83Data.DeviceTypeQualifier = 0;
/* VPD_DEVICE_IDENTIFIERS */
VPDPage83Data.PageCode = 0x83;
VPDPage83Data.Reserved = 0;
VPDPage83Data.PageLength =
sizeof(VPDPage83Data.IdDescriptorType1) +
sizeof(VPDPage83Data.IdDescriptorType2);
/* T10 Vendor Identifier Field Format */
/* VpdCodeSetAscii */
VPDPage83Data.IdDescriptorType1.CodeSet = 2;
/* VpdIdentifierTypeVendorId */
VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
VPDPage83Data.IdDescriptorType1.IdentifierLength =
sizeof(VPDPage83Data.IdDescriptorType1) - 4;
/* "ADAPTEC " for adaptec */
memcpy(VPDPage83Data.IdDescriptorType1.VendId,
"ADAPTEC ",
sizeof(VPDPage83Data.IdDescriptorType1.VendId));
memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
"ARRAY ",
sizeof(
VPDPage83Data.IdDescriptorType1.ProductId));
/* Convert to ascii based serial number.
* The LSB is the the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
(u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
if (temp > 0x9) {
VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
'A' + (temp - 0xA);
} else {
VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
'0' + temp;
}
}
/* VpdCodeSetBinary */
VPDPage83Data.IdDescriptorType2.CodeSet = 1;
/* VpdIdentifierTypeEUI64 */
VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
VPDPage83Data.IdDescriptorType2.IdentifierLength =
sizeof(VPDPage83Data.IdDescriptorType2) - 4;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
get_serial_reply->uid;
VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
/* Move the inquiry data to the response buffer. */
scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
sizeof(VPDPage83Data));
} else {
/* It must be for VPD 0x80 */
char sp[13];
/* EVPD bit set */
sp[0] = INQD_PDT_DA;
sp[1] = scsicmd->cmnd[2];
sp[2] = 0;
sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
le32_to_cpu(get_serial_reply->uid));
scsi_sg_copy_from_buffer(scsicmd, sp,
sizeof(sp));
}
}
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
/**
* aac_get_container_serial - get container serial, none blocking.
*/
static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
{
int status;
struct aac_get_serial *dinfo;
struct fib * cmd_fibcontext;
struct aac_dev * dev;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (!(cmd_fibcontext = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof (struct aac_get_serial),
FsaNormal,
0, 1,
(fib_callback) get_container_serial_callback,
(void *) scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return -1;
}
/* Function: setinqserial
*
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI Unit Serial number.
* This is a fake. We should read a proper
* serial number from the container. <SuSE>But
* without docs it's quite hard to do it :-)
* So this will have to do in the meantime.</SuSE>
*/
static int setinqserial(struct aac_dev *dev, void *data, int cid)
{
/*
* This breaks array migration.
*/
return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
{
u8 *sense_buf = (u8 *)sense_data;
/* Sense data valid, err code 70h */
sense_buf[0] = 0x70; /* No info field */
sense_buf[1] = 0; /* Segment number, always zero */
sense_buf[2] = sense_key; /* Sense key */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
if (sense_key == ILLEGAL_REQUEST) {
sense_buf[7] = 10; /* Additional sense length */
sense_buf[15] = bit_pointer;
/* Illegal parameter is in the parameter block */
if (sense_code == SENCODE_INVALID_CDB_FIELD)
sense_buf[15] |= 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
} else
sense_buf[7] = 6; /* Additional sense length */
}
static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
{
if (lba & 0xffffffff00000000LL) {
int cid = scmd_id(cmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
cmd->scsi_done(cmd);
return 1;
}
return 0;
}
static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
{
return 0;
}
static void io_callback(void *context, struct fib * fibptr);
static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
struct aac_raw_io2 *readcmd2;
readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
memset(readcmd2, 0, sizeof(struct aac_raw_io2));
readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd2->byteCount = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
ret = aac_build_sgraw2(cmd, readcmd2,
dev->scsi_host_ptr->sg_tablesize);
if (ret < 0)
return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
} else {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(fib);
readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
readcmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
ret = aac_build_sgraw(cmd, &readcmd->sg);
if (ret < 0)
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(command,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_read64 *readcmd;
long ret;
aac_fib_init(fib);
readcmd = (struct aac_read64 *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
readcmd->cid = cpu_to_le16(scmd_id(cmd));
readcmd->sector_count = cpu_to_le16(count);
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->pad = 0;
readcmd->flags = 0;
ret = aac_build_sg64(cmd, &readcmd->sg);
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand64,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
u16 fibsize;
struct aac_read *readcmd;
struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
readcmd->cid = cpu_to_le32(scmd_id(cmd));
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
ret = aac_build_sg(cmd, &readcmd->sg);
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
struct aac_raw_io2 *writecmd2;
writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
memset(writecmd2, 0, sizeof(struct aac_raw_io2));
writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd2->byteCount = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd2->cid = cpu_to_le16(scmd_id(cmd));
writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
cpu_to_le16(RIO2_IO_TYPE_WRITE);
ret = aac_build_sgraw2(cmd, writecmd2,
dev->scsi_host_ptr->sg_tablesize);
if (ret < 0)
return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
} else {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(fib);
writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
writecmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
cpu_to_le16(RIO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
ret = aac_build_sgraw(cmd, &writecmd->sg);
if (ret < 0)
return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
}
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(command,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write64 *writecmd;
long ret;
aac_fib_init(fib);
writecmd = (struct aac_write64 *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
writecmd->cid = cpu_to_le16(scmd_id(cmd));
writecmd->sector_count = cpu_to_le16(count);
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->pad = 0;
writecmd->flags = 0;
ret = aac_build_sg64(cmd, &writecmd->sg);
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand64,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
u16 fibsize;
struct aac_write *writecmd;
struct aac_dev *dev = fib->dev;
long ret;
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
writecmd->cid = cpu_to_le32(scmd_id(cmd));
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
writecmd->count = cpu_to_le32(count *
dev->fsa_dev[scmd_id(cmd)].block_size);
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
ret = aac_build_sg(cmd, &writecmd->sg);
if (ret < 0)
return ret;
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ContainerCommand,
fib,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) cmd);
}
static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
{
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
case DMA_TO_DEVICE:
flag = SRB_DataOut;
break;
case DMA_BIDIRECTIONAL:
flag = SRB_DataIn | SRB_DataOut;
break;
case DMA_FROM_DEVICE:
flag = SRB_DataIn;
break;
case DMA_NONE:
default: /* shuts up some versions of gcc */
flag = SRB_NoDataXfer;
break;
}
srbcmd = (struct aac_srb*) fib_data(fib);
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
return srbcmd;
}
static void aac_srb_callback(void *context, struct fib * fibptr);
static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
long ret;
ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
if (ret < 0)
return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
((le32_to_cpu(srbcmd->sg.count) & 0xff) *
sizeof (struct sgentry64));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ScsiPortCommand64, fib,
fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback,
(void *) cmd);
}
static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
long ret;
ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
if (ret < 0)
return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
/*
* Build Scatter/Gather list
*/
fibsize = sizeof (struct aac_srb) +
(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
sizeof (struct sgentry));
BUG_ON (fibsize > (fib->dev->max_fib_size -
sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
(fib_callback) aac_srb_callback, (void *) cmd);
}
static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
{
if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
(fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
return FAILED;
return aac_scsi_32(fib, cmd);
}
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
int rcode;
u32 tmp;
struct aac_adapter_info *info;
struct aac_bus_info *command;
struct aac_bus_info_response *bus_info;
if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
aac_fib_init(fibptr);
info = (struct aac_adapter_info *) fib_data(fibptr);
memset(info,0,sizeof(*info));
rcode = aac_fib_send(RequestAdapterInfo,
fibptr,
sizeof(*info),
FsaNormal,
-1, 1, /* First `interrupt' command uses special wait */
NULL,
NULL);
if (rcode < 0) {
/* FIB should be freed only after
* getting the response from the F/W */
if (rcode != -ERESTARTSYS) {
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
return rcode;
}
memcpy(&dev->adapter_info, info, sizeof(*info));
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
struct aac_supplement_adapter_info * sinfo;
aac_fib_init(fibptr);
sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
memset(sinfo,0,sizeof(*sinfo));
rcode = aac_fib_send(RequestSupplementAdapterInfo,
fibptr,
sizeof(*sinfo),
FsaNormal,
1, 1,
NULL,
NULL);
if (rcode >= 0)
memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
if (rcode == -ERESTARTSYS) {
fibptr = aac_fib_alloc(dev);
if (!fibptr)
return -ENOMEM;
}
}
/*
* GetBusInfo
*/
aac_fib_init(fibptr);
bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
memset(bus_info, 0, sizeof(*bus_info));
command = (struct aac_bus_info *)bus_info;
command->Command = cpu_to_le32(VM_Ioctl);
command->ObjType = cpu_to_le32(FT_DRIVE);
command->MethodId = cpu_to_le32(1);
command->CtlCmd = cpu_to_le32(GetBusInfo);
rcode = aac_fib_send(ContainerCommand,
fibptr,
sizeof (*bus_info),
FsaNormal,
1, 1,
NULL, NULL);
/* reasoned default */
dev->maximum_num_physicals = 16;
if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
dev->name,
dev->id,
tmp>>24,
(tmp>>16)&0xff,
tmp&0xff,
le32_to_cpu(dev->adapter_info.kernelbuild),
(int)sizeof(dev->supplement_adapter_info.BuildDate),
dev->supplement_adapter_info.BuildDate);
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.monitorbuild));
tmp = le32_to_cpu(dev->adapter_info.biosrev);
printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
buffer[0] = '\0';
if (aac_get_serial_number(
shost_to_class(dev->scsi_host_ptr), buffer))
printk(KERN_INFO "%s%d: serial %s",
dev->name, dev->id, buffer);
if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
printk(KERN_INFO "%s%d: TSID %.*s\n",
dev->name, dev->id,
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
dev->supplement_adapter_info.VpdInfo.Tsid);
}
if (!aac_check_reset || ((aac_check_reset == 1) &&
(dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_IGNORE_RESET))) {
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
dev->name, dev->id);
}
}
dev->cache_protected = 0;
dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
AAC_FEATURE_JBOD) != 0);
dev->nondasd_support = 0;
dev->raid_scsi_mode = 0;
if(dev->adapter_info.options & AAC_OPT_NONDASD)
dev->nondasd_support = 1;
/*
* If the firmware supports ROMB RAID/SCSI mode and we are currently
* in RAID/SCSI mode, set the flag. For now if in this mode we will
* force nondasd support on. If we decide to allow the non-dasd flag
* additional changes changes will have to be made to support
* RAID/SCSI. the function aac_scsi_cmd in this module will have to be
* changed to support the new dev->raid_scsi_mode flag instead of
* leaching off of the dev->nondasd_support flag. Also in linit.c the
* function aac_detect will have to be modified where it sets up the
* max number of channels based on the aac->nondasd_support flag only.
*/
if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
(dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
dev->nondasd_support = 1;
dev->raid_scsi_mode = 1;
}
if (dev->raid_scsi_mode != 0)
printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
dev->name, dev->id);
if (nondasd != -1)
dev->nondasd_support = (nondasd!=0);
if (dev->nondasd_support && !dev->in_reset)
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
dev->needs_dac = 1;
dev->dac_support = 0;
if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
if (!dev->in_reset)
printk(KERN_INFO "%s%d: 64bit support enabled.\n",
dev->name, dev->id);
dev->dac_support = 1;
}
if(dacmode != -1) {
dev->dac_support = (dacmode!=0);
}
/* avoid problems with AAC_QUIRK_SCSI_32 controllers */
if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
& AAC_QUIRK_SCSI_32)) {
dev->nondasd_support = 0;
dev->jbod = 0;
expose_physicals = 0;
}
if(dev->dac_support != 0) {
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
dev->name, dev->id);
} else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
dev->name, dev->id);
dev->dac_support = 0;
} else {
printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
dev->name, dev->id);
rcode = -ENOMEM;
}
}
/*
* Deal with configuring for the individualized limits of each packet
* interface.
*/
dev->a_ops.adapter_scsi = (dev->dac_support)
? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
? aac_scsi_32_64
: aac_scsi_64)
: aac_scsi_32;
if (dev->raw_io_interface) {
dev->a_ops.adapter_bounds = (dev->raw_io_64)
? aac_bounds_64
: aac_bounds_32;
dev->a_ops.adapter_read = aac_read_raw_io;
dev->a_ops.adapter_write = aac_write_raw_io;
} else {
dev->a_ops.adapter_bounds = aac_bounds_32;
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
sizeof(struct aac_write) + sizeof(struct sgentry)) /
sizeof(struct sgentry);
if (dev->dac_support) {
dev->a_ops.adapter_read = aac_read_block64;
dev->a_ops.adapter_write = aac_write_block64;
/*
* 38 scatter gather elements
*/
dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size -
sizeof(struct aac_fibhdr) -
sizeof(struct aac_write64) +
sizeof(struct sgentry64)) /
sizeof(struct sgentry64);
} else {
dev->a_ops.adapter_read = aac_read_block;
dev->a_ops.adapter_write = aac_write_block;
}
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
/*
* Worst case size that could cause sg overflow when
* we break up SG elements that are larger than 64KB.
* Would be nice if we could tell the SCSI layer what
* the maximum SG element size can be. Worst case is
* (sg_tablesize-1) 4KB elements with one 64KB
* element.
* 32bit -> 468 or 238KB 64bit -> 424 or 212KB
*/
dev->scsi_host_ptr->max_sectors =
(dev->scsi_host_ptr->sg_tablesize * 8) + 112;
}
}
/* FIB should be freed only after getting the response from the F/W */
if (rcode != -ERESTARTSYS) {
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
return rcode;
}
static void io_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_read_reply *readreply;
struct scsi_cmnd *scsicmd;
u32 cid;
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
dev = fibptr->dev;
cid = scmd_id(scsicmd);
if (nblank(dprintk(x))) {
u64 lba;
switch (scsicmd->cmnd[0]) {
case WRITE_6:
case READ_6:
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
break;
case WRITE_16:
case READ_16:
lba = ((u64)scsicmd->cmnd[2] << 56) |
((u64)scsicmd->cmnd[3] << 48) |
((u64)scsicmd->cmnd[4] << 40) |
((u64)scsicmd->cmnd[5] << 32) |
((u64)scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
break;
case WRITE_12:
case READ_12:
lba = ((u64)scsicmd->cmnd[2] << 24) |
(scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
break;
default:
lba = ((u64)scsicmd->cmnd[2] << 24) |
(scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
break;
}
printk(KERN_DEBUG
"io_callback[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies);
}
BUG_ON(fibptr == NULL);
scsi_dma_unmap(scsicmd);
readreply = (struct aac_read_reply *)fib_data(fibptr);
switch (le32_to_cpu(readreply->status)) {
case ST_OK:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD;
dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
break;
case ST_NOT_READY:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
break;
default:
#ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status));
#endif
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
break;
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd)
{
u64 lba;
u32 count;
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
* Get block address and transfer length
*/
switch (scsicmd->cmnd[0]) {
case READ_6:
dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
break;
case READ_16:
dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 56) |
((u64)scsicmd->cmnd[3] << 48) |
((u64)scsicmd->cmnd[4] << 40) |
((u64)scsicmd->cmnd[5] << 32) |
((u64)scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
count = (scsicmd->cmnd[10] << 24) |
(scsicmd->cmnd[11] << 16) |
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
break;
case READ_12:
dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) |
(scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
break;
default:
dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) |
(scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
break;
}
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 1;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
return 0;
/*
* Alocate and initialize a Fib
*/
if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
printk(KERN_WARNING "aac_read: fib allocation failed\n");
return -1;
}
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return 0;
}
static int aac_write(struct scsi_cmnd * scsicmd)
{
u64 lba;
u32 count;
int fua;
int status;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
* Get block address and transfer length
*/
if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
{
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
fua = 0;
} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 56) |
((u64)scsicmd->cmnd[3] << 48) |
((u64)scsicmd->cmnd[4] << 40) |
((u64)scsicmd->cmnd[5] << 32) |
((u64)scsicmd->cmnd[6] << 24) |
(scsicmd->cmnd[7] << 16) |
(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
fua = scsicmd->cmnd[1] & 0x8;
} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
| (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
| (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
fua = scsicmd->cmnd[1] & 0x8;
} else {
dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
fua = scsicmd->cmnd[1] & 0x8;
}
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 1;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
if (aac_adapter_bounds(dev,scsicmd,lba))
return 0;
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
/* FIB temporarily unavailable,not catastrophic failure */
/* scsicmd->result = DID_ERROR << 16;
* scsicmd->scsi_done(scsicmd);
* return 0;
*/
printk(KERN_WARNING "aac_write: fib allocation failed\n");
return -1;
}
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return 0;
}
static void synchronize_callback(void *context, struct fib *fibptr)
{
struct aac_synchronize_reply *synchronizereply;
struct scsi_cmnd *cmd;
cmd = context;
if (!aac_valid_context(cmd, fibptr))
return;
dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
smp_processor_id(), jiffies));
BUG_ON(fibptr == NULL);
synchronizereply = fib_data(fibptr);
if (le32_to_cpu(synchronizereply->status) == CT_OK)
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
struct aac_dev *dev = fibptr->dev;
u32 cid = sdev_id(sdev);
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
le32_to_cpu(synchronizereply->status));
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
cmd->scsi_done(cmd);
}
static int aac_synchronize(struct scsi_cmnd *scsicmd)
{
int status;
struct fib *cmd_fibcontext;
struct aac_synchronize *synchronizecmd;
struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
int active = 0;
struct aac_dev *aac;
u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
unsigned long flags;
/*
* Wait for all outstanding queued commands to complete to this
* specific target (block).
*/
spin_lock_irqsave(&sdev->list_lock, flags);
list_for_each_entry(cmd, &sdev->cmd_list, list)
if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
u64 cmnd_lba;
u32 cmnd_count;
if (cmd->cmnd[0] == WRITE_6) {
cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
(cmd->cmnd[2] << 8) |
cmd->cmnd[3];
cmnd_count = cmd->cmnd[4];
if (cmnd_count == 0)
cmnd_count = 256;
} else if (cmd->cmnd[0] == WRITE_16) {
cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
((u64)cmd->cmnd[3] << 48) |
((u64)cmd->cmnd[4] << 40) |
((u64)cmd->cmnd[5] << 32) |
((u64)cmd->cmnd[6] << 24) |
(cmd->cmnd[7] << 16) |
(cmd->cmnd[8] << 8) |
cmd->cmnd[9];
cmnd_count = (cmd->cmnd[10] << 24) |
(cmd->cmnd[11] << 16) |
(cmd->cmnd[12] << 8) |
cmd->cmnd[13];
} else if (cmd->cmnd[0] == WRITE_12) {
cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
(cmd->cmnd[3] << 16) |
(cmd->cmnd[4] << 8) |
cmd->cmnd[5];
cmnd_count = (cmd->cmnd[6] << 24) |
(cmd->cmnd[7] << 16) |
(cmd->cmnd[8] << 8) |
cmd->cmnd[9];
} else if (cmd->cmnd[0] == WRITE_10) {
cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
(cmd->cmnd[3] << 16) |
(cmd->cmnd[4] << 8) |
cmd->cmnd[5];
cmnd_count = (cmd->cmnd[7] << 8) |
cmd->cmnd[8];
} else
continue;
if (((cmnd_lba + cmnd_count) < lba) ||
(count && ((lba + count) < cmnd_lba)))
continue;
++active;
break;
}
spin_unlock_irqrestore(&sdev->list_lock, flags);
/*
* Yield the processor (requeue for later)
*/
if (active)
return SCSI_MLQUEUE_DEVICE_BUSY;
aac = (struct aac_dev *)sdev->host->hostdata;
if (aac->in_reset)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Allocate and initialize a Fib
*/
if (!(cmd_fibcontext = aac_fib_alloc(aac)))
return SCSI_MLQUEUE_HOST_BUSY;
aac_fib_init(cmd_fibcontext);
synchronizecmd = fib_data(cmd_fibcontext);
synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof(struct aac_synchronize),
FsaNormal,
0, 1,
(fib_callback)synchronize_callback,
(void *)scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING
"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return SCSI_MLQUEUE_HOST_BUSY;
}
static void aac_start_stop_callback(void *context, struct fib *fibptr)
{
struct scsi_cmnd *scsicmd = context;
if (!aac_valid_context(scsicmd, fibptr))
return;
BUG_ON(fibptr == NULL);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
static int aac_start_stop(struct scsi_cmnd *scsicmd)
{
int status;
struct fib *cmd_fibcontext;
struct aac_power_management *pmcmd;
struct scsi_device *sdev = scsicmd->device;
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
if (!(aac->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_POWER_MANAGEMENT)) {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
if (aac->in_reset)
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Allocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc(aac);
if (!cmd_fibcontext)
return SCSI_MLQUEUE_HOST_BUSY;
aac_fib_init(cmd_fibcontext);
pmcmd = fib_data(cmd_fibcontext);
pmcmd->command = cpu_to_le32(VM_ContainerConfig);
pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
/* Eject bit ignored, not relevant */
pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
pmcmd->cid = cpu_to_le32(sdev_id(sdev));
pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
/*
* Now send the Fib to the adapter
*/
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof(struct aac_power_management),
FsaNormal,
0, 1,
(fib_callback)aac_start_stop_callback,
(void *)scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return SCSI_MLQUEUE_HOST_BUSY;
}
/**
* aac_scsi_cmd() - Process SCSI command
* @scsicmd: SCSI command block
*
* Emulate a SCSI command and queue the required request for the
* aacraid firmware.
*/
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
u32 cid;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
if (fsa_dev_ptr == NULL)
return -1;
/*
* If the bus, id or lun is out of range, return fail
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
cid = scmd_id(scsicmd);
if (cid != host->this_id) {
if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
if((cid >= dev->maximum_num_containers) ||
(scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
/*
* If the target container doesn't exist, it may have
* been newly created
*/
if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
(fsa_dev_ptr[cid].sense_data.sense_key ==
NOT_READY)) {
switch (scsicmd->cmnd[0]) {
case SERVICE_ACTION_IN_16:
if (!(dev->raw_io_interface) ||
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
if (dev->in_reset)
return -1;
return _aac_probe_container(scsicmd,
aac_probe_container_callback2);
default:
break;
}
}
} else { /* check for physical non-dasd devices */
if (dev->nondasd_support || expose_physicals ||
dev->jbod) {
if (dev->in_reset)
return -1;
return aac_send_srb_fib(scsicmd);
} else {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
}
}
/*
* else Command for the controller itself
*/
else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
(scsicmd->cmnd[0] != TEST_UNIT_READY))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
/* Handle commands here that don't really require going out to the adapter */
switch (scsicmd->cmnd[0]) {
case INQUIRY:
{
struct inquiry_data inq_data;
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
memset(&inq_data, 0, sizeof (struct inquiry_data));
if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
char *arr = (char *)&inq_data;
/* EVPD bit set */
arr[0] = (scmd_id(scsicmd) == host->this_id) ?
INQD_PDT_PROC : INQD_PDT_DA;
if (scsicmd->cmnd[2] == 0) {
/* supported vital product data pages */
arr[3] = 3;
arr[4] = 0x0;
arr[5] = 0x80;
arr[6] = 0x83;
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
scmd_id(scsicmd));
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
sno[3] = setinqserial(dev, &sno[4],
scmd_id(scsicmd));
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 7, 2);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
}
scsicmd->scsi_done(scsicmd);
return 0;
}
inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data.inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
/*
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
if (cid == host->this_id) {
setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
if (dev->in_reset)
return -1;
setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
return aac_get_container_name(scsicmd);
}
case SERVICE_ACTION_IN_16:
if (!(dev->raw_io_interface) ||
!(dev->raw_io_64) ||
((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
break;
{
u64 capacity;
char cp[13];
unsigned int alloc_len;
dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
capacity = fsa_dev_ptr[cid].size - 1;
cp[0] = (capacity >> 56) & 0xff;
cp[1] = (capacity >> 48) & 0xff;
cp[2] = (capacity >> 40) & 0xff;
cp[3] = (capacity >> 32) & 0xff;
cp[4] = (capacity >> 24) & 0xff;
cp[5] = (capacity >> 16) & 0xff;
cp[6] = (capacity >> 8) & 0xff;
cp[7] = (capacity >> 0) & 0xff;
cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
cp[12] = 0;
alloc_len = ((scsicmd->cmnd[10] << 24)
+ (scsicmd->cmnd[11] << 16)
+ (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
alloc_len = min_t(size_t, alloc_len, sizeof(cp));
scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
if (alloc_len < scsi_bufflen(scsicmd))
scsi_set_resid(scsicmd,
scsi_bufflen(scsicmd) - alloc_len);
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
case READ_CAPACITY:
{
u32 capacity;
char cp[8];
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
cp[0] = (capacity >> 24) & 0xff;
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
cp[3] = (capacity >> 0) & 0xff;
cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
case MODE_SENSE:
{
int mode_buf_length = 4;
u32 capacity;
aac_modep_data mpd;
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
memset((char *)&mpd, 0, sizeof(aac_modep_data));
/* Mode data length */
mpd.hd.data_length = sizeof(mpd.hd) - 1;
/* Medium type - default */
mpd.hd.med_type = 0;
/* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
mpd.hd.dev_par = 0;
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mpd.hd.dev_par = 0x10;
if (scsicmd->cmnd[1] & 0x8)
mpd.hd.bd_length = 0; /* Block descriptor length */
else {
mpd.hd.bd_length = sizeof(mpd.bd);
mpd.hd.data_length += mpd.hd.bd_length;
mpd.bd.block_length[0] =
(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
mpd.bd.block_length[1] =
(fsa_dev_ptr[cid].block_size >> 8) & 0xff;
mpd.bd.block_length[2] =
fsa_dev_ptr[cid].block_size & 0xff;
mpd.mpc_buf[0] = scsicmd->cmnd[2];
if (scsicmd->cmnd[2] == 0x1C) {
/* page length */
mpd.mpc_buf[1] = 0xa;
/* Mode data length */
mpd.hd.data_length = 23;
} else {
/* Mode data length */
mpd.hd.data_length = 15;
}
if (capacity > 0xffffff) {
mpd.bd.block_count[0] = 0xff;
mpd.bd.block_count[1] = 0xff;
mpd.bd.block_count[2] = 0xff;
} else {
mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
mpd.bd.block_count[2] = capacity & 0xff;
}
}
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
mpd.hd.data_length += 3;
mpd.mpc_buf[0] = 8;
mpd.mpc_buf[1] = 1;
mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
mode_buf_length = sizeof(mpd);
}
if (mode_buf_length > scsicmd->cmnd[4])
mode_buf_length = scsicmd->cmnd[4];
else
mode_buf_length = sizeof(mpd);
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
case MODE_SENSE_10:
{
u32 capacity;
int mode_buf_length = 8;
aac_modep10_data mpd10;
if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
/* Mode data length (MSB) */
mpd10.hd.data_length[0] = 0;
/* Mode data length (LSB) */
mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
/* Medium type - default */
mpd10.hd.med_type = 0;
/* Device-specific param,
bit 8: 0/1 = write enabled/protected
bit 4: 0/1 = FUA enabled */
mpd10.hd.dev_par = 0;
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
mpd10.hd.dev_par = 0x10;
mpd10.hd.rsrvd[0] = 0; /* reserved */
mpd10.hd.rsrvd[1] = 0; /* reserved */
if (scsicmd->cmnd[1] & 0x8) {
/* Block descriptor length (MSB) */
mpd10.hd.bd_length[0] = 0;
/* Block descriptor length (LSB) */
mpd10.hd.bd_length[1] = 0;
} else {
mpd10.hd.bd_length[0] = 0;
mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
mpd10.bd.block_length[0] =
(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
mpd10.bd.block_length[1] =
(fsa_dev_ptr[cid].block_size >> 8) & 0xff;
mpd10.bd.block_length[2] =
fsa_dev_ptr[cid].block_size & 0xff;
if (capacity > 0xffffff) {
mpd10.bd.block_count[0] = 0xff;
mpd10.bd.block_count[1] = 0xff;
mpd10.bd.block_count[2] = 0xff;
} else {
mpd10.bd.block_count[0] =
(capacity >> 16) & 0xff;
mpd10.bd.block_count[1] =
(capacity >> 8) & 0xff;
mpd10.bd.block_count[2] =
capacity & 0xff;
}
}
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
mpd10.hd.data_length[1] += 3;
mpd10.mpc_buf[0] = 8;
mpd10.mpc_buf[1] = 1;
mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
? 0 : 0x04; /* WCE */
mode_buf_length = sizeof(mpd10);
if (mode_buf_length > scsicmd->cmnd[8])
mode_buf_length = scsicmd->cmnd[8];
}
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd10,
mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
case REQUEST_SENSE:
dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
case ALLOW_MEDIUM_REMOVAL:
dprintk((KERN_DEBUG "LOCK command.\n"));
if (scsicmd->cmnd[4])
fsa_dev_ptr[cid].locked = 1;
else
fsa_dev_ptr[cid].locked = 0;
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
/*
* These commands are all No-Ops
*/
case TEST_UNIT_READY:
if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
NOT_READY, SENCODE_BECOMING_READY,
ASENCODE_BECOMING_READY, 0, 0);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
/* FALLTHRU */
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
case START_STOP:
return aac_start_stop(scsicmd);
}
switch (scsicmd->cmnd[0])
{
case READ_6:
case READ_10:
case READ_12:
case READ_16:
if (dev->in_reset)
return -1;
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
* containers to /dev/sd device names
*/
if (scsicmd->request->rq_disk)
strlcpy(fsa_dev_ptr[cid].devname,
scsicmd->request->rq_disk->disk_name,
min(sizeof(fsa_dev_ptr[cid].devname),
sizeof(scsicmd->request->rq_disk->disk_name) + 1));
return aac_read(scsicmd);
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
if (dev->in_reset)
return -1;
return aac_write(scsicmd);
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
/* Issue FIB to tell Firmware to flush it's cache */
if ((aac_cache & 6) != 2)
return aac_synchronize(scsicmd);
/* FALLTHRU */
default:
/*
* Unhandled commands
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
}
static int query_disk(struct aac_dev *dev, void __user *arg)
{
struct aac_query_disk qd;
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
if (!fsa_dev_ptr)
return -EBUSY;
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
if (qd.cnum == -1)
qd.cnum = qd.id;
else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL;
qd.instance = dev->scsi_host_ptr->host_no;
qd.bus = 0;
qd.id = CONTAINER_TO_ID(qd.cnum);
qd.lun = CONTAINER_TO_LUN(qd.cnum);
}
else return -EINVAL;
qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
qd.locked = fsa_dev_ptr[qd.cnum].locked;
qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
qd.unmapped = 1;
else
qd.unmapped = 0;
strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
return -EFAULT;
return 0;
}
static int force_delete_disk(struct aac_dev *dev, void __user *arg)
{
struct aac_delete_disk dd;
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
if (!fsa_dev_ptr)
return -EBUSY;
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum >= dev->maximum_num_containers)
return -EINVAL;
/*
* Mark this container as being deleted.
*/
fsa_dev_ptr[dd.cnum].deleted = 1;
/*
* Mark the container as no longer valid
*/
fsa_dev_ptr[dd.cnum].valid = 0;
return 0;
}
static int delete_disk(struct aac_dev *dev, void __user *arg)
{
struct aac_delete_disk dd;
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
if (!fsa_dev_ptr)
return -EBUSY;
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
if (dd.cnum >= dev->maximum_num_containers)
return -EINVAL;
/*
* If the container is locked, it can not be deleted by the API.
*/
if (fsa_dev_ptr[dd.cnum].locked)
return -EBUSY;
else {
/*
* Mark the container as no longer being valid.
*/
fsa_dev_ptr[dd.cnum].valid = 0;
fsa_dev_ptr[dd.cnum].devname[0] = '\0';
return 0;
}
}
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
{
switch (cmd) {
case FSACTL_QUERY_DISK:
return query_disk(dev, arg);
case FSACTL_DELETE_DISK:
return delete_disk(dev, arg);
case FSACTL_FORCE_DELETE_DISK:
return force_delete_disk(dev, arg);
case FSACTL_GET_CONTAINERS:
return aac_get_containers(dev);
default:
return -ENOTTY;
}
}
/**
*
* aac_srb_callback
* @context: the context set in the fib - here it is scsi cmd
* @fibptr: pointer to the fib
*
* Handles the completion of a scsi command to a non dasd device
*
*/
static void aac_srb_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd;
scsicmd = (struct scsi_cmnd *) context;
if (!aac_valid_context(scsicmd, fibptr))
return;
BUG_ON(fibptr == NULL);
dev = fibptr->dev;
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
/* fast response */
srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
} else {
/*
* Calculate resid for sg
*/
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- le32_to_cpu(srbreply->data_xfer_length));
}
scsi_dma_unmap(scsicmd);
/* expose physical device if expose_physicald flag is on */
if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
&& expose_physicals > 0)
aac_expose_phy_device(scsicmd);
/*
* First check the fib status
*/
if (le32_to_cpu(srbreply->status) != ST_OK){
int len;
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
SCSI_SENSE_BUFFERSIZE);
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
/*
* Next check the srb status
*/
switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_DATA_OVERRUN:
switch(scsicmd->cmnd[0]){
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
case READ_16:
case WRITE_16:
if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
}
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
case INQUIRY: {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
}
default:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
}
break;
case SRB_STATUS_ABORTED:
scsicmd->result = DID_ABORT << 16 | ABORT << 8;
break;
case SRB_STATUS_ABORT_FAILED:
// Not sure about this one - but assuming the hba was trying to abort for some reason
scsicmd->result = DID_ERROR << 16 | ABORT << 8;
break;
case SRB_STATUS_PARITY_ERROR:
scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_PATH_ID:
case SRB_STATUS_INVALID_TARGET_ID:
case SRB_STATUS_INVALID_LUN:
case SRB_STATUS_SELECTION_TIMEOUT:
scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_COMMAND_TIMEOUT:
case SRB_STATUS_TIMEOUT:
scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUSY:
scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUS_RESET:
scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_MESSAGE_REJECTED:
scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
break;
case SRB_STATUS_REQUEST_FLUSHED:
case SRB_STATUS_ERROR:
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_REQUEST_SENSE_FAILED:
case SRB_STATUS_NO_HBA:
case SRB_STATUS_UNEXPECTED_BUS_FREE:
case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
case SRB_STATUS_DELAYED_RETRY:
case SRB_STATUS_BAD_FUNCTION:
case SRB_STATUS_NOT_STARTED:
case SRB_STATUS_NOT_IN_USE:
case SRB_STATUS_FORCE_ABORT:
case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
default:
#ifdef AAC_DETAILED_STATUS_INFO
printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
le32_to_cpu(srbreply->srb_status) & 0x3F,
aac_get_status_string(
le32_to_cpu(srbreply->srb_status) & 0x3F),
scsicmd->cmnd[0],
le32_to_cpu(srbreply->scsi_status));
#endif
if ((scsicmd->cmnd[0] == ATA_12)
|| (scsicmd->cmnd[0] == ATA_16)) {
if (scsicmd->cmnd[2] & (0x01 << 5)) {
scsicmd->result = DID_OK << 16
| COMMAND_COMPLETE << 8;
break;
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
break;
}
} else {
scsicmd->result = DID_ERROR << 16
| COMMAND_COMPLETE << 8;
break;
}
}
if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
int len;
scsicmd->result |= SAM_STAT_CHECK_CONDITION;
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
SCSI_SENSE_BUFFERSIZE);
#ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
le32_to_cpu(srbreply->status), len);
#endif
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
/*
* OR in the scsi status (already shifted up a bit)
*/
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
/**
*
* aac_send_scb_fib
* @scsicmd: the scsi command block
*
* This routine will form a FIB and fill in the aac_srb from the
* scsicmd passed in.
*/
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
{
struct fib* cmd_fibcontext;
struct aac_dev* dev;
int status;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
return -1;
}
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
* Check that the command queued to the controller
*/
if (status == -EINPROGRESS) {
scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
return -1;
}
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
if (nseg < 0)
return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
psg->count = cpu_to_le32(nseg);
scsi_for_each_sg(scsicmd, sg, nseg, i) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
}
/* hba wants the size to be exact */
if (byte_count > scsi_bufflen(scsicmd)) {
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
(byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
return byte_count;
}
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
if (nseg < 0)
return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
addr = sg_dma_address(sg);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
psg->sg[i].count = cpu_to_le32(count);
byte_count += count;
}
psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
if (byte_count > scsi_bufflen(scsicmd)) {
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
(byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
return byte_count;
}
static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
{
unsigned long byte_count = 0;
int nseg;
// Get rid of old data
psg->count = 0;
psg->sg[0].next = 0;
psg->sg[0].prev = 0;
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
psg->sg[0].flags = 0;
nseg = scsi_dma_map(scsicmd);
if (nseg < 0)
return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
u64 addr = sg_dma_address(sg);
psg->sg[i].next = 0;
psg->sg[i].prev = 0;
psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
psg->sg[i].count = cpu_to_le32(count);
psg->sg[i].flags = 0;
byte_count += count;
}
psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
if (byte_count > scsi_bufflen(scsicmd)) {
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
(byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
return byte_count;
}
static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
struct aac_raw_io2 *rio2, int sg_max)
{
unsigned long byte_count = 0;
int nseg;
nseg = scsi_dma_map(scsicmd);
if (nseg < 0)
return nseg;
if (nseg) {
struct scatterlist *sg;
int i, conformable = 0;
u32 min_size = PAGE_SIZE, cur_size;
scsi_for_each_sg(scsicmd, sg, nseg, i) {
int count = sg_dma_len(sg);
u64 addr = sg_dma_address(sg);
BUG_ON(i >= sg_max);
rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
cur_size = cpu_to_le32(count);
rio2->sge[i].length = cur_size;
rio2->sge[i].flags = 0;
if (i == 0) {
conformable = 1;
rio2->sgeFirstSize = cur_size;
} else if (i == 1) {
rio2->sgeNominalSize = cur_size;
min_size = cur_size;
} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
conformable = 0;
if (cur_size < min_size)
min_size = cur_size;
}
byte_count += count;
}
/* hba wants the size to be exact */
if (byte_count > scsi_bufflen(scsicmd)) {
u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
(byte_count - scsi_bufflen(scsicmd));
rio2->sge[i-1].length = cpu_to_le32(temp);
byte_count = scsi_bufflen(scsicmd);
}
rio2->sgeCnt = cpu_to_le32(nseg);
rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
/* not conformable: evaluate required sg elements */
if (!conformable) {
int j, nseg_new = nseg, err_found;
for (i = min_size / PAGE_SIZE; i >= 1; --i) {
err_found = 0;
nseg_new = 2;
for (j = 1; j < nseg - 1; ++j) {
if (rio2->sge[j].length % (i*PAGE_SIZE)) {
err_found = 1;
break;
}
nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
}
if (!err_found)
break;
}
if (i > 0 && nseg_new <= sg_max)
aac_convert_sgraw2(rio2, i, nseg, nseg_new);
} else
rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
/* Check for command underflow */
if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
return byte_count;
}
static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
{
struct sge_ieee1212 *sge;
int i, j, pos;
u32 addr_low;
if (aac_convert_sgl == 0)
return 0;
sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
if (sge == NULL)
return -1;
for (i = 1, pos = 1; i < nseg-1; ++i) {
for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
sge[pos].addrLow = addr_low;
sge[pos].addrHigh = rio2->sge[i].addrHigh;
if (addr_low < rio2->sge[i].addrLow)
sge[pos].addrHigh++;
sge[pos].length = pages * PAGE_SIZE;
sge[pos].flags = 0;
pos++;
}
}
sge[pos] = rio2->sge[nseg-1];
memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
kfree(sge);
rio2->sgeCnt = cpu_to_le32(nseg_new);
rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
rio2->sgeNominalSize = pages * PAGE_SIZE;
return 0;
}
#ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info {
u32 status;
char *str;
};
static struct aac_srb_status_info srb_status_info[] = {
{ SRB_STATUS_PENDING, "Pending Status"},
{ SRB_STATUS_SUCCESS, "Success"},
{ SRB_STATUS_ABORTED, "Aborted Command"},
{ SRB_STATUS_ABORT_FAILED, "Abort Failed"},
{ SRB_STATUS_ERROR, "Error Event"},
{ SRB_STATUS_BUSY, "Device Busy"},
{ SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
{ SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
{ SRB_STATUS_NO_DEVICE, "No Device"},
{ SRB_STATUS_TIMEOUT, "Timeout"},
{ SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
{ SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
{ SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
{ SRB_STATUS_BUS_RESET, "Bus Reset"},
{ SRB_STATUS_PARITY_ERROR, "Parity Error"},
{ SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
{ SRB_STATUS_NO_HBA, "No HBA"},
{ SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
{ SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
{ SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
{ SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
{ SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
{ SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
{ SRB_STATUS_INVALID_LUN, "Invalid LUN"},
{ SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
{ SRB_STATUS_BAD_FUNCTION, "Bad Function"},
{ SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
{ SRB_STATUS_NOT_STARTED, "Not Started"},
{ SRB_STATUS_NOT_IN_USE, "Not In Use"},
{ SRB_STATUS_FORCE_ABORT, "Force Abort"},
{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
{ 0xff, "Unknown Error"}
};
char *aac_get_status_string(u32 status)
{
int i;
for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
if (srb_status_info[i].status == status)
return srb_status_info[i].str;
return "Bad Status Code";
}
#endif
| gpl-2.0 |
01org/prd | drivers/scsi/aacraid/rx.c | 619 | 17637 | /*
* Adaptec AAC series RAID controller driver
* (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
* Copyright (c) 2000-2010 Adaptec, Inc.
* 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Module Name:
* rx.c
*
* Abstract: Hardware miniport for Drawbridge specific hardware functions.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
unsigned long bellbits;
u8 intstat = rx_readb(dev, MUnit.OISR);
/*
* Read mask and invert because drawbridge is reversed.
* This allows us to only service interrupts that have
* been enabled.
* Check to see if this is our interrupt. If it isn't just return
*/
if (likely(intstat & ~(dev->OIMR))) {
bellbits = rx_readl(dev, OutboundDoorbellReg);
if (unlikely(bellbits & DoorBellPrintfReady)) {
aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
{
int isAif, isFastResponse, isSpecial;
struct aac_dev *dev = dev_id;
u32 Index = rx_readl(dev, MUnit.OutboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
Index = rx_readl(dev, MUnit.OutboundQueue);
if (likely(Index != 0xFFFFFFFFL)) {
do {
isAif = isFastResponse = isSpecial = 0;
if (Index & 0x00000002L) {
isAif = 1;
if (Index == 0xFFFFFFFEL)
isSpecial = 1;
Index &= ~0x00000002L;
} else {
if (Index & 0x00000001L)
isFastResponse = 1;
Index >>= 2;
}
if (!isSpecial) {
if (unlikely(aac_intr_normal(dev,
Index, isAif,
isFastResponse, NULL))) {
rx_writel(dev,
MUnit.OutboundQueue,
Index);
rx_writel(dev,
MUnit.ODR,
DoorBellAdapterNormRespReady);
}
}
Index = rx_readl(dev, MUnit.OutboundQueue);
} while (Index != 0xFFFFFFFFL);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
/**
* aac_rx_disable_interrupt - Disable interrupts
* @dev: Adapter
*/
static void aac_rx_disable_interrupt(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
}
/**
* aac_rx_enable_interrupt_producer - Enable interrupts
* @dev: Adapter
*/
static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
}
/**
* aac_rx_enable_interrupt_message - Enable interrupts
* @dev: Adapter
*/
static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
}
/**
* rx_sync_cmd - send a command and wait
* @dev: Adapter
* @command: Command to execute
* @p1: first parameter
* @ret: adapter status
*
* This routine will send a synchronous command to the adapter and wait
* for its completion.
*/
static int rx_sync_cmd(struct aac_dev *dev, u32 command,
u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
{
unsigned long start;
int ok;
/*
* Write the command into Mailbox 0
*/
writel(command, &dev->IndexRegs->Mailbox[0]);
/*
* Write the parameters into Mailboxes 1 - 6
*/
writel(p1, &dev->IndexRegs->Mailbox[1]);
writel(p2, &dev->IndexRegs->Mailbox[2]);
writel(p3, &dev->IndexRegs->Mailbox[3]);
writel(p4, &dev->IndexRegs->Mailbox[4]);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
/*
* Disable doorbell interrupts
*/
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
/*
* Force the completion of the mask register write before issuing
* the interrupt.
*/
rx_readb (dev, MUnit.OIMR);
/*
* Signal that there is a new synch command
*/
rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
ok = 0;
start = jiffies;
/*
* Wait up to 30 seconds
*/
while (time_before(jiffies, start+30*HZ))
{
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
ok = 1;
break;
}
/*
* Yield the processor in case we are slow
*/
msleep(1);
}
if (unlikely(ok != 1)) {
/*
* Restore interrupt mask even though we timed out
*/
aac_adapter_enable_int(dev);
return -ETIMEDOUT;
}
/*
* Pull the synch status from Mailbox 0.
*/
if (status)
*status = readl(&dev->IndexRegs->Mailbox[0]);
if (r1)
*r1 = readl(&dev->IndexRegs->Mailbox[1]);
if (r2)
*r2 = readl(&dev->IndexRegs->Mailbox[2]);
if (r3)
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
/*
* Clear the synch command doorbell.
*/
rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
/*
* Restore interrupt mask
*/
aac_adapter_enable_int(dev);
return 0;
}
/**
* aac_rx_interrupt_adapter - interrupt adapter
* @dev: Adapter
*
* Send an interrupt to the i960 and breakpoint it.
*/
static void aac_rx_interrupt_adapter(struct aac_dev *dev)
{
rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
/**
* aac_rx_notify_adapter - send an event to the adapter
* @dev: Adapter
* @event: Event to send
*
* Notify the i960 that something it probably cares about has
* happened.
*/
static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
{
switch (event) {
case AdapNormCmdQue:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
break;
case HostNormRespNotFull:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
break;
case AdapNormRespQue:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
break;
case HostNormCmdNotFull:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
break;
case HostShutdown:
break;
case FastIo:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
break;
case AdapPrintfDone:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
break;
default:
BUG();
break;
}
}
/**
* aac_rx_start_adapter - activate adapter
* @dev: Adapter
*
* Start up processing on an i960 based AAC adapter
*/
static void aac_rx_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
/**
* aac_rx_check_health
* @dev: device to check if healthy
*
* Will attempt to determine if the specified adapter is alive and
* capable of handling requests, returning 0 if alive.
*/
static int aac_rx_check_health(struct aac_dev *dev)
{
u32 status = rx_readl(dev, MUnit.OMRx[0]);
/*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & SELF_TEST_FAILED))
return -1;
/*
* Check to see if the board panic'd.
*/
if (unlikely(status & KERNEL_PANIC)) {
char * buffer;
struct POSTSTATUS {
__le32 Post_Command;
__le32 Post_Address;
} * post;
dma_addr_t paddr, baddr;
int ret;
if (likely((status & 0xFF000000L) == 0xBC000000L))
return (status >> 16) & 0xFF;
buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
ret = -2;
if (unlikely(buffer == NULL))
return ret;
post = pci_alloc_consistent(dev->pdev,
sizeof(struct POSTSTATUS), &paddr);
if (unlikely(post == NULL)) {
pci_free_consistent(dev->pdev, 512, buffer, baddr);
return ret;
}
memset(buffer, 0, 512);
post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
post->Post_Address = cpu_to_le32(baddr);
rx_writel(dev, MUnit.IMRx[0], paddr);
rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr);
if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
ret = (hex_to_bin(buffer[2]) << 4) +
hex_to_bin(buffer[3]);
}
pci_free_consistent(dev->pdev, 512, buffer, baddr);
return ret;
}
/*
* Wait for the adapter to be up and running.
*/
if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
return -3;
/*
* Everything is OK
*/
return 0;
}
/**
* aac_rx_deliver_producer
* @fib: fib to issue
*
* Will send a fib, returning 0 if successful.
*/
int aac_rx_deliver_producer(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
u32 Index;
unsigned long nointr = 0;
aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
atomic_inc(&q->numpending);
*(q->headers.producer) = cpu_to_le32(Index + 1);
if (!(nointr & aac_config.irq_mod))
aac_adapter_notify(dev, AdapNormCmdQueue);
return 0;
}
/**
* aac_rx_deliver_message
* @fib: fib to issue
*
* Will send a fib, returning 0 if successful.
*/
static int aac_rx_deliver_message(struct fib * fib)
{
struct aac_dev *dev = fib->dev;
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
u32 Index;
u64 addr;
volatile void __iomem *device;
unsigned long count = 10000000L; /* 50 seconds */
atomic_inc(&q->numpending);
for(;;) {
Index = rx_readl(dev, MUnit.InboundQueue);
if (unlikely(Index == 0xFFFFFFFFL))
Index = rx_readl(dev, MUnit.InboundQueue);
if (likely(Index != 0xFFFFFFFFL))
break;
if (--count == 0) {
atomic_dec(&q->numpending);
return -ETIMEDOUT;
}
udelay(5);
}
device = dev->base + Index;
addr = fib->hw_fib_pa;
writel((u32)(addr & 0xffffffff), device);
device += sizeof(u32);
writel((u32)(addr >> 32), device);
device += sizeof(u32);
writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
rx_writel(dev, MUnit.InboundQueue, Index);
return 0;
}
/**
* aac_rx_ioremap
* @size: mapping resize request
*
*/
static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
{
if (!size) {
iounmap(dev->regs.rx);
return 0;
}
dev->base = dev->regs.rx = ioremap(dev->base_start, size);
if (dev->base == NULL)
return -1;
dev->IndexRegs = &dev->regs.rx->IndexRegs;
return 0;
}
static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
{
u32 var = 0;
if (!(dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
else {
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
if (!bled && (var != 0x00000001) && (var != 0x3803000F))
bled = -EINVAL;
}
if (bled && (bled != -ETIMEDOUT))
bled = aac_adapter_sync_cmd(dev, IOP_RESET,
0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
if (bled && (bled != -ETIMEDOUT))
return -EINVAL;
}
if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
rx_writel(dev, MUnit.reserved2, 3);
msleep(5000); /* Delay 5 seconds */
var = 0x00000001;
}
if (bled && (var != 0x00000001))
return -EINVAL;
ssleep(5);
if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
return -ENODEV;
if (startup_timeout < 300)
startup_timeout = 300;
return 0;
}
/**
* aac_rx_select_comm - Select communications method
* @dev: Adapter
* @comm: communications method
*/
int aac_rx_select_comm(struct aac_dev *dev, int comm)
{
switch (comm) {
case AAC_COMM_PRODUCER:
dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
dev->a_ops.adapter_intr = aac_rx_intr_producer;
dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
break;
case AAC_COMM_MESSAGE:
dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
dev->a_ops.adapter_intr = aac_rx_intr_message;
dev->a_ops.adapter_deliver = aac_rx_deliver_message;
break;
default:
return 1;
}
return 0;
}
/**
* aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
*
* Allocate and set up resources for the i960 based AAC variants. The
* device_interface in the commregion will be allocated and linked
* to the comm region.
*/
int _aac_rx_init(struct aac_dev *dev)
{
unsigned long start;
unsigned long status;
int restart = 0;
int instance = dev->id;
const char * name = dev->name;
if (aac_adapter_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
}
/* Failure to reset here is an option ... */
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
!aac_rx_restart_adapter(dev, 0))
/* Make sure the Hardware FIFO is empty */
while ((++restart < 512) &&
(rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
/*
* Check to see if the board panic'd while booting.
*/
status = rx_readl(dev, MUnit.OMRx[0]);
if (status & KERNEL_PANIC) {
if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
goto error_iounmap;
++restart;
}
/*
* Check to see if the board failed any self tests.
*/
status = rx_readl(dev, MUnit.OMRx[0]);
if (status & SELF_TEST_FAILED) {
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
goto error_iounmap;
}
/*
* Check to see if the monitor panic'd while booting.
*/
if (status & MONITOR_PANIC) {
printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
goto error_iounmap;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
{
if ((restart &&
(status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
time_after(jiffies, start+HZ*startup_timeout)) {
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
dev->name, instance, status);
goto error_iounmap;
}
if (!restart &&
((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
time_after(jiffies, start + HZ *
((startup_timeout > 60)
? (startup_timeout - 60)
: (startup_timeout / 2))))) {
if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
start = jiffies;
++restart;
}
msleep(1);
}
if (restart && aac_commit)
aac_commit = 1;
/*
* Fill in the common function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health;
dev->a_ops.adapter_restart = aac_rx_restart_adapter;
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
aac_adapter_comm(dev, AAC_COMM_PRODUCER);
aac_adapter_disable_int(dev);
rx_writel(dev, MUnit.ODR, 0xffffffff);
aac_adapter_enable_int(dev);
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
aac_adapter_comm(dev, dev->comm_interface);
dev->sync_mode = 0; /* sync. mode not supported */
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_rx_start_adapter(dev);
return 0;
error_iounmap:
return -1;
}
int aac_rx_init(struct aac_dev *dev)
{
/*
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_ioremap = aac_rx_ioremap;
dev->a_ops.adapter_comm = aac_rx_select_comm;
return _aac_rx_init(dev);
}
| gpl-2.0 |
vanloswang/linux | drivers/spi/spi-octeon.c | 619 | 6332 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011, 2012 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-mpi-defs.h>
#define OCTEON_SPI_CFG 0
#define OCTEON_SPI_STS 0x08
#define OCTEON_SPI_TX 0x10
#define OCTEON_SPI_DAT0 0x80
#define OCTEON_SPI_MAX_BYTES 9
#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
struct octeon_spi {
u64 register_base;
u64 last_cfg;
u64 cs_enax;
};
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
unsigned int loops = 0;
do {
if (loops++)
__delay(500);
mpi_sts.u64 = cvmx_read_csr(p->register_base + OCTEON_SPI_STS);
} while (mpi_sts.s.busy);
}
static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_message *msg,
struct spi_transfer *xfer,
bool last_xfer)
{
struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
unsigned int speed_hz;
int mode;
bool cpha, cpol;
const u8 *tx_buf;
u8 *rx_buf;
int len;
int i;
mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
mpi_cfg.u64 = 0;
mpi_cfg.s.clkdiv = clkdiv;
mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
mpi_cfg.s.idlelo = cpha != cpol;
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
if (spi->chip_select < 4)
p->cs_enax |= 1ull << (12 + spi->chip_select);
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
p->last_cfg = mpi_cfg.u64;
cvmx_write_csr(p->register_base + OCTEON_SPI_CFG, mpi_cfg.u64);
}
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
len = xfer->len;
while (len > OCTEON_SPI_MAX_BYTES) {
for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
u8 d;
if (tx_buf)
d = *tx_buf++;
else
d = 0;
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
mpi_tx.s.csid = spi->chip_select;
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
octeon_spi_wait_ready(p);
if (rx_buf)
for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
*rx_buf++ = (u8)v;
}
len -= OCTEON_SPI_MAX_BYTES;
}
for (i = 0; i < len; i++) {
u8 d;
if (tx_buf)
d = *tx_buf++;
else
d = 0;
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
mpi_tx.s.csid = spi->chip_select;
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
mpi_tx.s.leavecs = !xfer->cs_change;
mpi_tx.s.txnum = tx_buf ? len : 0;
mpi_tx.s.totnum = len;
cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
octeon_spi_wait_ready(p);
if (rx_buf)
for (i = 0; i < len; i++) {
u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
*rx_buf++ = (u8)v;
}
if (xfer->delay_usecs)
udelay(xfer->delay_usecs);
return xfer->len;
}
static int octeon_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct octeon_spi *p = spi_master_get_devdata(master);
unsigned int total_len = 0;
int status = 0;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
bool last_xfer = list_is_last(&xfer->transfer_list,
&msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
goto err;
}
total_len += r;
}
err:
msg->status = status;
msg->actual_length = total_len;
spi_finalize_current_message(master);
return status;
}
static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
struct spi_master *master;
struct octeon_spi *p;
int err = -ENOENT;
master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
if (!master)
return -ENOMEM;
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, master);
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem == NULL) {
dev_err(&pdev->dev, "found no memory resource\n");
err = -ENXIO;
goto fail;
}
if (!devm_request_mem_region(&pdev->dev, res_mem->start,
resource_size(res_mem), res_mem->name)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
goto fail;
}
p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
SPI_CPOL |
SPI_CS_HIGH |
SPI_LSB_FIRST |
SPI_3WIRE;
master->transfer_one_message = octeon_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
master->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "register master failed: %d\n", err);
goto fail;
}
dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
return 0;
fail:
spi_master_put(master);
return err;
}
static int octeon_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct octeon_spi *p = spi_master_get_devdata(master);
u64 register_base = p->register_base;
/* Clear the CSENA* and put everything in a known state. */
cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
return 0;
}
static const struct of_device_id octeon_spi_match[] = {
{ .compatible = "cavium,octeon-3010-spi", },
{},
};
MODULE_DEVICE_TABLE(of, octeon_spi_match);
static struct platform_driver octeon_spi_driver = {
.driver = {
.name = "spi-octeon",
.of_match_table = octeon_spi_match,
},
.probe = octeon_spi_probe,
.remove = octeon_spi_remove,
};
module_platform_driver(octeon_spi_driver);
MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mkborg/linux-olimex-som | arch/x86/um/sys_call_table_64.c | 619 | 1585 | /*
* System call table for UML/x86-64, copied from arch/x86/kernel/syscall_*.c
* with some changes for UML.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <generated/user_constants.h>
#define __NO_STUBS
/*
* Below you can see, in terms of #define's, the differences between the x86-64
* and the UML syscall table.
*/
/* Not going to be implemented by UML, since we have no hardware. */
#define sys_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
/*
* The UML TLS problem. Note that x86_64 does not implement this, so the below
* is needed only for the ia32 compatibility.
*/
/* On UML we call it this way ("old" means it's not mmap2) */
#define sys_mmap old_mmap
#define stub_clone sys_clone
#define stub_fork sys_fork
#define stub_vfork sys_vfork
#define stub_execve sys_execve
#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
int syscall_table_size = sizeof(sys_call_table);
| gpl-2.0 |
PerLycke/android_kernel_moto_shamu | arch/s390/mm/hugetlbpage.c | 1131 | 2600 | /*
* IBM System z Huge TLB Page Support for Kernel.
*
* Copyright IBM Corp. 2007
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#include <linux/mm.h>
#include <linux/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
{
pmd_t *pmdp = (pmd_t *) pteptr;
unsigned long mask;
if (!MACHINE_HAS_HPAGE) {
pteptr = (pte_t *) pte_page(pteval)[1].index;
mask = pte_val(pteval) &
(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
}
pmd_val(*pmdp) = pte_val(pteval);
}
int arch_prepare_hugepage(struct page *page)
{
unsigned long addr = page_to_phys(page);
pte_t pte;
pte_t *ptep;
int i;
if (MACHINE_HAS_HPAGE)
return 0;
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
if (!ptep)
return -ENOMEM;
pte_val(pte) = addr;
for (i = 0; i < PTRS_PER_PTE; i++) {
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
pte_val(pte) += PAGE_SIZE;
}
page[1].index = (unsigned long) ptep;
return 0;
}
void arch_release_hugepage(struct page *page)
{
pte_t *ptep;
if (MACHINE_HAS_HPAGE)
return;
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
pudp = pud_alloc(mm, pgdp, addr);
if (pudp)
pmdp = pmd_alloc(mm, pudp, addr);
return (pte_t *) pmdp;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp = NULL;
pgdp = pgd_offset(mm, addr);
if (pgd_present(*pgdp)) {
pudp = pud_offset(pgdp, addr);
if (pud_present(*pudp))
pmdp = pmd_offset(pudp, addr);
}
return (pte_t *) pmdp;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd)
{
if (!MACHINE_HAS_HPAGE)
return 0;
return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
}
int pud_huge(pud_t pud)
{
return 0;
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmdp, int write)
{
struct page *page;
if (!MACHINE_HAS_HPAGE)
return NULL;
page = pmd_page(*pmdp);
if (page)
page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
return page;
}
| gpl-2.0 |
ProjectX-Android/kernel_oneplus_msm8974 | arch/arm/mach-msm/clock-dummy.c | 1899 | 1438 | /* Copyright (c) 2011,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <mach/clk-provider.h>
static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
{
return 0;
}
static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
{
clk->rate = rate;
return 0;
}
static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
{
return 0;
}
static int dummy_clk_set_flags(struct clk *clk, unsigned flags)
{
return 0;
}
static unsigned long dummy_clk_get_rate(struct clk *clk)
{
return clk->rate;
}
static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
{
return rate;
}
struct clk_ops clk_ops_dummy = {
.reset = dummy_clk_reset,
.set_rate = dummy_clk_set_rate,
.set_max_rate = dummy_clk_set_max_rate,
.set_flags = dummy_clk_set_flags,
.get_rate = dummy_clk_get_rate,
.round_rate = dummy_clk_round_rate,
};
struct clk dummy_clk = {
.dbg_name = "dummy_clk",
.ops = &clk_ops_dummy,
CLK_INIT(dummy_clk),
};
| gpl-2.0 |
cbolumar/android_kernel_samsung_a3ultexx | drivers/media/usb/cx231xx/cx231xx-dvb.c | 2155 | 20349 | /*
DVB device driver for cx231xx
Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
Based on em28xx driver
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "cx231xx.h"
#include <media/v4l2-common.h>
#include <media/videobuf-vmalloc.h>
#include "xc5000.h"
#include "s5h1432.h"
#include "tda18271.h"
#include "s5h1411.h"
#include "lgdt3305.h"
#include "mb86a20s.h"
MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
MODULE_AUTHOR("Srinivasa Deevi <srinivasa.deevi@conexant.com>");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define dprintk(level, fmt, arg...) do { \
if (debug >= level) \
printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg); \
} while (0)
#define CX231XX_DVB_NUM_BUFS 5
#define CX231XX_DVB_MAX_PACKETSIZE 564
#define CX231XX_DVB_MAX_PACKETS 64
struct cx231xx_dvb {
struct dvb_frontend *frontend;
/* feed count management */
struct mutex lock;
int nfeeds;
/* general boilerplate stuff */
struct dvb_adapter adapter;
struct dvb_demux demux;
struct dmxdev dmxdev;
struct dmx_frontend fe_hw;
struct dmx_frontend fe_mem;
struct dvb_net net;
};
static struct s5h1432_config dvico_s5h1432_config = {
.output_mode = S5H1432_SERIAL_OUTPUT,
.gpio = S5H1432_GPIO_ON,
.qam_if = S5H1432_IF_4000,
.vsb_if = S5H1432_IF_4000,
.inversion = S5H1432_INVERSION_OFF,
.status_mode = S5H1432_DEMODLOCKING,
.mpeg_timing = S5H1432_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
static struct tda18271_std_map cnxt_rde253s_tda18271_std_map = {
.dvbt_6 = { .if_freq = 4000, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, },
.dvbt_7 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, },
.dvbt_8 = { .if_freq = 4000, .agc_mode = 3, .std = 6,
.if_lvl = 1, .rfagc_top = 0x37, },
};
static struct tda18271_std_map mb86a20s_tda18271_config = {
.dvbt_6 = { .if_freq = 4000, .agc_mode = 3, .std = 4,
.if_lvl = 0, .rfagc_top = 0x37, },
};
static struct tda18271_config cnxt_rde253s_tunerconfig = {
.std_map = &cnxt_rde253s_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
static struct s5h1411_config tda18271_s5h1411_config = {
.output_mode = S5H1411_SERIAL_OUTPUT,
.gpio = S5H1411_GPIO_OFF,
.vsb_if = S5H1411_IF_3250,
.qam_if = S5H1411_IF_4000,
.inversion = S5H1411_INVERSION_ON,
.status_mode = S5H1411_DEMODLOCKING,
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
static struct s5h1411_config xc5000_s5h1411_config = {
.output_mode = S5H1411_SERIAL_OUTPUT,
.gpio = S5H1411_GPIO_OFF,
.vsb_if = S5H1411_IF_3250,
.qam_if = S5H1411_IF_3250,
.inversion = S5H1411_INVERSION_OFF,
.status_mode = S5H1411_DEMODLOCKING,
.mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
};
static struct lgdt3305_config hcw_lgdt3305_config = {
.i2c_addr = 0x0e,
.mpeg_mode = LGDT3305_MPEG_SERIAL,
.tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
.tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
.deny_i2c_rptr = 1,
.spectral_inversion = 1,
.qam_if_khz = 4000,
.vsb_if_khz = 3250,
};
static struct tda18271_std_map hauppauge_tda18271_std_map = {
.atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x58, },
.qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x58, },
};
static struct tda18271_config hcw_tda18271_config = {
.std_map = &hauppauge_tda18271_std_map,
.gate = TDA18271_GATE_DIGITAL,
};
static const struct mb86a20s_config pv_mb86a20s_config = {
.demod_address = 0x10,
.is_serial = true,
};
static struct tda18271_config pv_tda18271_config = {
.std_map = &mb86a20s_tda18271_config,
.gate = TDA18271_GATE_DIGITAL,
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
switch (status) {
case -ENOENT:
errmsg = "unlinked synchronuously";
break;
case -ECONNRESET:
errmsg = "unlinked asynchronuously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
break;
case -EPIPE:
errmsg = "Stalled (device not responding)";
break;
case -EOVERFLOW:
errmsg = "Babble (bad cable?)";
break;
case -EPROTO:
errmsg = "Bit-stuff error (bad cable?)";
break;
case -EILSEQ:
errmsg = "CRC/Timeout (could be anything)";
break;
case -ETIME:
errmsg = "Device does not respond";
break;
}
if (packet < 0) {
dprintk(1, "URB status %d [%s].\n", status, errmsg);
} else {
dprintk(1, "URB packet %d, status %d [%s].\n",
packet, status, errmsg);
}
}
static inline int dvb_isoc_copy(struct cx231xx *dev, struct urb *urb)
{
int i;
if (!dev)
return 0;
if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
print_err_status(dev, -1, urb->status);
if (urb->status == -ENOENT)
return 0;
}
for (i = 0; i < urb->number_of_packets; i++) {
int status = urb->iso_frame_desc[i].status;
if (status < 0) {
print_err_status(dev, i, status);
if (urb->iso_frame_desc[i].status != -EPROTO)
continue;
}
dvb_dmx_swfilter(&dev->dvb->demux,
urb->transfer_buffer +
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
}
return 0;
}
static inline int dvb_bulk_copy(struct cx231xx *dev, struct urb *urb)
{
if (!dev)
return 0;
if (dev->state & DEV_DISCONNECTED)
return 0;
if (urb->status < 0) {
print_err_status(dev, -1, urb->status);
if (urb->status == -ENOENT)
return 0;
}
/* Feed the transport payload into the kernel demux */
dvb_dmx_swfilter(&dev->dvb->demux,
urb->transfer_buffer, urb->actual_length);
return 0;
}
static int start_streaming(struct cx231xx_dvb *dvb)
{
int rc;
struct cx231xx *dev = dvb->adapter.priv;
if (dev->USE_ISO) {
cx231xx_info("DVB transfer mode is ISO.\n");
mutex_lock(&dev->i2c_lock);
cx231xx_enable_i2c_port_3(dev, false);
cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
cx231xx_enable_i2c_port_3(dev, true);
mutex_unlock(&dev->i2c_lock);
rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (rc < 0)
return rc;
dev->mode_tv = 1;
return cx231xx_init_isoc(dev, CX231XX_DVB_MAX_PACKETS,
CX231XX_DVB_NUM_BUFS,
dev->ts1_mode.max_pkt_size,
dvb_isoc_copy);
} else {
cx231xx_info("DVB transfer mode is BULK.\n");
cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
if (rc < 0)
return rc;
dev->mode_tv = 1;
return cx231xx_init_bulk(dev, CX231XX_DVB_MAX_PACKETS,
CX231XX_DVB_NUM_BUFS,
dev->ts1_mode.max_pkt_size,
dvb_bulk_copy);
}
}
static int stop_streaming(struct cx231xx_dvb *dvb)
{
struct cx231xx *dev = dvb->adapter.priv;
if (dev->USE_ISO)
cx231xx_uninit_isoc(dev);
else
cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
return 0;
}
static int start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct cx231xx_dvb *dvb = demux->priv;
int rc, ret;
if (!demux->dmx.frontend)
return -EINVAL;
mutex_lock(&dvb->lock);
dvb->nfeeds++;
rc = dvb->nfeeds;
if (dvb->nfeeds == 1) {
ret = start_streaming(dvb);
if (ret < 0)
rc = ret;
}
mutex_unlock(&dvb->lock);
return rc;
}
static int stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct cx231xx_dvb *dvb = demux->priv;
int err = 0;
mutex_lock(&dvb->lock);
dvb->nfeeds--;
if (0 == dvb->nfeeds)
err = stop_streaming(dvb);
mutex_unlock(&dvb->lock);
return err;
}
/* ------------------------------------------------------------------ */
static int cx231xx_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
{
struct cx231xx *dev = fe->dvb->priv;
if (acquire)
return cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
else
return cx231xx_set_mode(dev, CX231XX_SUSPEND);
}
/* ------------------------------------------------------------------ */
static struct xc5000_config cnxt_rde250_tunerconfig = {
.i2c_address = 0x61,
.if_khz = 4000,
};
static struct xc5000_config cnxt_rdu250_tunerconfig = {
.i2c_address = 0x61,
.if_khz = 3250,
};
/* ------------------------------------------------------------------ */
#if 0
static int attach_xc5000(u8 addr, struct cx231xx *dev)
{
struct dvb_frontend *fe;
struct xc5000_config cfg;
memset(&cfg, 0, sizeof(cfg));
cfg.i2c_adap = &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap;
cfg.i2c_addr = addr;
if (!dev->dvb->frontend) {
printk(KERN_ERR "%s/2: dvb frontend not attached. "
"Can't attach xc5000\n", dev->name);
return -EINVAL;
}
fe = dvb_attach(xc5000_attach, dev->dvb->frontend, &cfg);
if (!fe) {
printk(KERN_ERR "%s/2: xc5000 attach failed\n", dev->name);
dvb_frontend_detach(dev->dvb->frontend);
dev->dvb->frontend = NULL;
return -EINVAL;
}
printk(KERN_INFO "%s/2: xc5000 attached\n", dev->name);
return 0;
}
#endif
int cx231xx_set_analog_freq(struct cx231xx *dev, u32 freq)
{
int status = 0;
if ((dev->dvb != NULL) && (dev->dvb->frontend != NULL)) {
struct dvb_tuner_ops *dops = &dev->dvb->frontend->ops.tuner_ops;
if (dops->set_analog_params != NULL) {
struct analog_parameters params;
params.frequency = freq;
params.std = dev->norm;
params.mode = 0; /* 0- Air; 1 - cable */
/*params.audmode = ; */
/* Set the analog parameters to set the frequency */
dops->set_analog_params(dev->dvb->frontend, ¶ms);
}
}
return status;
}
int cx231xx_reset_analog_tuner(struct cx231xx *dev)
{
int status = 0;
if ((dev->dvb != NULL) && (dev->dvb->frontend != NULL)) {
struct dvb_tuner_ops *dops = &dev->dvb->frontend->ops.tuner_ops;
if (dops->init != NULL && !dev->xc_fw_load_done) {
cx231xx_info("Reloading firmware for XC5000\n");
status = dops->init(dev->dvb->frontend);
if (status == 0) {
dev->xc_fw_load_done = 1;
cx231xx_info
("XC5000 firmware download completed\n");
} else {
dev->xc_fw_load_done = 0;
cx231xx_info
("XC5000 firmware download failed !!!\n");
}
}
}
return status;
}
/* ------------------------------------------------------------------ */
static int register_dvb(struct cx231xx_dvb *dvb,
struct module *module,
struct cx231xx *dev, struct device *device)
{
int result;
mutex_init(&dvb->lock);
/* register adapter */
result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
adapter_nr);
if (result < 0) {
printk(KERN_WARNING
"%s: dvb_register_adapter failed (errno = %d)\n",
dev->name, result);
goto fail_adapter;
}
/* Ensure all frontends negotiate bus access */
dvb->frontend->ops.ts_bus_ctrl = cx231xx_dvb_bus_ctrl;
dvb->adapter.priv = dev;
/* register frontend */
result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
if (result < 0) {
printk(KERN_WARNING
"%s: dvb_register_frontend failed (errno = %d)\n",
dev->name, result);
goto fail_frontend;
}
/* register demux stuff */
dvb->demux.dmx.capabilities =
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING;
dvb->demux.priv = dvb;
dvb->demux.filternum = 256;
dvb->demux.feednum = 256;
dvb->demux.start_feed = start_feed;
dvb->demux.stop_feed = stop_feed;
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
dev->name, result);
goto fail_dmx;
}
dvb->dmxdev.filternum = 256;
dvb->dmxdev.demux = &dvb->demux.dmx;
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
dev->name, result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING
"%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
dev->name, result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
printk(KERN_WARNING
"%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
dev->name, result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING
"%s: connect_frontend failed (errno = %d)\n", dev->name,
result);
goto fail_fe_conn;
}
/* register network adapter */
dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
return 0;
fail_fe_conn:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
fail_fe_mem:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
fail_fe_hw:
dvb_dmxdev_release(&dvb->dmxdev);
fail_dmxdev:
dvb_dmx_release(&dvb->demux);
fail_dmx:
dvb_unregister_frontend(dvb->frontend);
fail_frontend:
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
fail_adapter:
return result;
}
static void unregister_dvb(struct cx231xx_dvb *dvb)
{
dvb_net_release(&dvb->net);
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
dvb_dmxdev_release(&dvb->dmxdev);
dvb_dmx_release(&dvb->demux);
dvb_unregister_frontend(dvb->frontend);
dvb_frontend_detach(dvb->frontend);
dvb_unregister_adapter(&dvb->adapter);
}
static int dvb_init(struct cx231xx *dev)
{
int result = 0;
struct cx231xx_dvb *dvb;
if (!dev->board.has_dvb) {
/* This device does not support the extension */
return 0;
}
dvb = kzalloc(sizeof(struct cx231xx_dvb), GFP_KERNEL);
if (dvb == NULL) {
printk(KERN_INFO "cx231xx_dvb: memory allocation failed\n");
return -ENOMEM;
}
dev->dvb = dvb;
dev->cx231xx_set_analog_freq = cx231xx_set_analog_freq;
dev->cx231xx_reset_analog_tuner = cx231xx_reset_analog_tuner;
mutex_lock(&dev->lock);
cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
cx231xx_demod_reset(dev);
/* init frontend */
switch (dev->model) {
case CX231XX_BOARD_CNXT_CARRAERA:
case CX231XX_BOARD_CNXT_RDE_250:
dev->dvb->frontend = dvb_attach(s5h1432_attach,
&dvico_s5h1432_config,
&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rde250_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
case CX231XX_BOARD_CNXT_SHELBY:
case CX231XX_BOARD_CNXT_RDU_250:
dev->dvb->frontend = dvb_attach(s5h1411_attach,
&xc5000_s5h1411_config,
&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(xc5000_attach, dev->dvb->frontend,
&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rdu250_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
case CX231XX_BOARD_CNXT_RDE_253S:
dev->dvb->frontend = dvb_attach(s5h1432_attach,
&dvico_s5h1432_config,
&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach s5h1432 front end\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
case CX231XX_BOARD_CNXT_RDU_253S:
dev->dvb->frontend = dvb_attach(s5h1411_attach,
&tda18271_s5h1411_config,
&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach s5h1411 front end\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
if (!dvb_attach(tda18271_attach, dev->dvb->frontend,
0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&cnxt_rde253s_tunerconfig)) {
result = -EINVAL;
goto out_free;
}
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
printk(KERN_INFO "%s: looking for tuner / demod on i2c bus: %d\n",
__func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
dev->dvb->frontend = dvb_attach(lgdt3305_attach,
&hcw_lgdt3305_config,
&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach LG3305 front end\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
dvb_attach(tda18271_attach, dev->dvb->frontend,
0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&hcw_tda18271_config);
break;
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_KWORLD_UB430_USB_HYBRID:
printk(KERN_INFO "%s: looking for demod on i2c bus: %d\n",
__func__, i2c_adapter_id(&dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap));
dev->dvb->frontend = dvb_attach(mb86a20s_attach,
&pv_mb86a20s_config,
&dev->i2c_bus[dev->board.demod_i2c_master].i2c_adap);
if (dev->dvb->frontend == NULL) {
printk(DRIVER_NAME
": Failed to attach mb86a20s demod\n");
result = -EINVAL;
goto out_free;
}
/* define general-purpose callback pointer */
dvb->frontend->callback = cx231xx_tuner_callback;
dvb_attach(tda18271_attach, dev->dvb->frontend,
0x60, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
&pv_tda18271_config);
break;
default:
printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n", dev->name);
break;
}
if (NULL == dvb->frontend) {
printk(KERN_ERR
"%s/2: frontend initialization failed\n", dev->name);
result = -EINVAL;
goto out_free;
}
/* register everything */
result = register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
if (result < 0)
goto out_free;
printk(KERN_INFO "Successfully loaded cx231xx-dvb\n");
ret:
cx231xx_set_mode(dev, CX231XX_SUSPEND);
mutex_unlock(&dev->lock);
return result;
out_free:
kfree(dvb);
dev->dvb = NULL;
goto ret;
}
static int dvb_fini(struct cx231xx *dev)
{
if (!dev->board.has_dvb) {
/* This device does not support the extension */
return 0;
}
if (dev->dvb) {
unregister_dvb(dev->dvb);
dev->dvb = NULL;
}
return 0;
}
static struct cx231xx_ops dvb_ops = {
.id = CX231XX_DVB,
.name = "Cx231xx dvb Extension",
.init = dvb_init,
.fini = dvb_fini,
};
static int __init cx231xx_dvb_register(void)
{
return cx231xx_register_extension(&dvb_ops);
}
static void __exit cx231xx_dvb_unregister(void)
{
cx231xx_unregister_extension(&dvb_ops);
}
module_init(cx231xx_dvb_register);
module_exit(cx231xx_dvb_unregister);
| gpl-2.0 |
CyanogenMod/android_kernel_lge_g3 | drivers/net/wireless/bcmdhd/siutils.c | 2155 | 56361 | /*
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
* Copyright (C) 1999-2012, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: siutils.c 328733 2012-04-20 14:49:55Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <bcmdevs.h>
#include <hndsoc.h>
#include <sbchipc.h>
#include <pcicfg.h>
#include <sbpcmcia.h>
#include <sbsocram.h>
#include <bcmsdh.h>
#include <sdio.h>
#include <sbsdio.h>
#include <sbhnddma.h>
#include <sbsdpcmdev.h>
#include <bcmsdpcm.h>
#include <hndpmu.h>
#include "siutils_priv.h"
/* local prototypes */
static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz);
static bool si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh);
static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, void *regs);
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
int do_4360_pcie2_war = 0;
/*
* Allocate a si handle.
* devid - pci device id (used to determine chip#)
* osh - opaque OS handle
* regs - virtual address of initial core registers
* bustype - pci/pcmcia/sb/sdio/etc
* vars - pointer to a pointer area for "environment" variables
* varsz - pointer to int to return the size of the vars
*/
si_t *
si_attach(uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz)
{
si_info_t *sii;
/* alloc si_info_t */
if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
return (NULL);
}
if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
MFREE(osh, sii, sizeof(si_info_t));
return (NULL);
}
sii->vars = vars ? *vars : NULL;
sii->varsz = varsz ? *varsz : 0;
return (si_t *)sii;
}
/* global kernel resource */
static si_info_t ksii;
static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
/* generic kernel variant of si_attach() */
si_t *
si_kattach(osl_t *osh)
{
static bool ksii_attached = FALSE;
if (!ksii_attached) {
void *regs;
regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
SI_BUS, NULL,
osh != SI_OSH ? &ksii.vars : NULL,
osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
SI_ERROR(("si_kattach: si_doattach failed\n"));
REG_UNMAP(regs);
return NULL;
}
REG_UNMAP(regs);
/* save ticks normalized to ms for si_watchdog_ms() */
if (PMUCTL_ENAB(&ksii.pub)) {
/* based on 32KHz ILP clock */
wd_msticks = 32;
} else {
wd_msticks = ALP_CLOCK / 1000;
}
ksii_attached = TRUE;
SI_MSG(("si_kattach done. ccrev = %d, wd_msticks = %d\n",
ksii.pub.ccrev, wd_msticks));
}
return &ksii.pub;
}
static bool
si_buscore_prep(si_info_t *sii, uint bustype, uint devid, void *sdh)
{
/* need to set memseg flag for CF card first before any sb registers access */
if (BUSTYPE(bustype) == PCMCIA_BUS)
sii->memseg = TRUE;
if (BUSTYPE(bustype) == SDIO_BUS) {
int err;
uint8 clkset;
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
if (!err) {
uint8 clkval;
/* If register supported, wait for ALPAvail and then force ALP */
clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) == clkset) {
SPINWAIT(((clkval = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, NULL)), !SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
SI_ERROR(("timeout on ALPAV wait, clkval 0x%02x\n",
clkval));
return FALSE;
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
clkset, &err);
OSL_DELAY(65);
}
}
/* Also, disable the extra SDIO pull-ups */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
}
return TRUE;
}
static bool
si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, void *regs)
{
bool pci, pcie, pcie_gen2 = FALSE;
uint i;
uint pciidx, pcieidx, pcirev, pcierev;
cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
ASSERT((uintptr)cc);
/* get chipcommon rev */
sii->pub.ccrev = (int)si_corerev(&sii->pub);
/* get chipcommon chipstatus */
if (sii->pub.ccrev >= 11)
sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus);
/* get chipcommon capabilites */
sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities);
/* get chipcommon extended capabilities */
if (sii->pub.ccrev >= 35)
sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext);
/* get pmu rev and caps */
if (sii->pub.cccaps & CC_CAP_PMU) {
sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n",
sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev,
sii->pub.pmucaps));
/* figure out bus/orignal core idx */
sii->pub.buscoretype = NODEV_CORE_ID;
sii->pub.buscorerev = (uint)NOREV;
sii->pub.buscoreidx = BADIDX;
pci = pcie = FALSE;
pcirev = pcierev = (uint)NOREV;
pciidx = pcieidx = BADIDX;
for (i = 0; i < sii->numcores; i++) {
uint cid, crev;
si_setcoreidx(&sii->pub, i);
cid = si_coreid(&sii->pub);
crev = si_corerev(&sii->pub);
/* Display cores found */
SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
i, cid, crev, sii->coresba[i], sii->regs[i]));
if (BUSTYPE(bustype) == PCI_BUS) {
if (cid == PCI_CORE_ID) {
pciidx = i;
pcirev = crev;
pci = TRUE;
} else if ((cid == PCIE_CORE_ID) || (cid == PCIE2_CORE_ID)) {
pcieidx = i;
pcierev = crev;
pcie = TRUE;
if (cid == PCIE2_CORE_ID)
pcie_gen2 = TRUE;
}
} else if ((BUSTYPE(bustype) == PCMCIA_BUS) &&
(cid == PCMCIA_CORE_ID)) {
sii->pub.buscorerev = crev;
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
else if (((BUSTYPE(bustype) == SDIO_BUS) ||
(BUSTYPE(bustype) == SPI_BUS)) &&
((cid == PCMCIA_CORE_ID) ||
(cid == SDIOD_CORE_ID))) {
sii->pub.buscorerev = crev;
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
/* find the core idx before entering this func. */
if ((savewin && (savewin == sii->coresba[i])) ||
(regs == sii->regs[i]))
*origidx = i;
}
if (pci) {
sii->pub.buscoretype = PCI_CORE_ID;
sii->pub.buscorerev = pcirev;
sii->pub.buscoreidx = pciidx;
} else if (pcie) {
if (pcie_gen2)
sii->pub.buscoretype = PCIE2_CORE_ID;
else
sii->pub.buscoretype = PCIE_CORE_ID;
sii->pub.buscorerev = pcierev;
sii->pub.buscoreidx = pcieidx;
}
SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
sii->pub.buscorerev));
if (BUSTYPE(sii->pub.bustype) == SI_BUS && (CHIPID(sii->pub.chip) == BCM4712_CHIP_ID) &&
(sii->pub.chippkg != BCM4712LARGE_PKG_ID) && (CHIPREV(sii->pub.chiprev) <= 3))
OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
* already running.
*/
if ((BUSTYPE(bustype) == SDIO_BUS) || (BUSTYPE(bustype) == SPI_BUS)) {
if (si_setcore(&sii->pub, ARM7S_CORE_ID, 0) ||
si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
si_core_disable(&sii->pub, 0);
}
/* return to the original core */
si_setcoreidx(&sii->pub, *origidx);
return TRUE;
}
static si_info_t *
si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz)
{
struct si_pub *sih = &sii->pub;
uint32 w, savewin;
chipcregs_t *cc;
char *pvars = NULL;
uint origidx;
ASSERT(GOODREGS(regs));
bzero((uchar*)sii, sizeof(si_info_t));
savewin = 0;
sih->buscoreidx = BADIDX;
sii->curmap = regs;
sii->sdh = sdh;
sii->osh = osh;
/* find Chipcommon address */
if (bustype == PCI_BUS) {
savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
savewin = SI_ENUM_BASE;
OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
if (!regs)
return NULL;
cc = (chipcregs_t *)regs;
} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
cc = (chipcregs_t *)sii->curmap;
} else {
cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
}
sih->bustype = bustype;
if (bustype != BUSTYPE(bustype)) {
SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n",
bustype, BUSTYPE(bustype)));
return NULL;
}
/* bus/core/clk setup for register access */
if (!si_buscore_prep(sii, bustype, devid, sdh)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n", bustype));
return NULL;
}
/* ChipID recognition.
* We assume we can read chipid at offset 0 from the regs arg.
* If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
* some way of recognizing them needs to be added here.
*/
if (!cc) {
SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
return NULL;
}
w = R_REG(osh, &cc->chipid);
sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
sih->chippkg = BCM4329_182PIN_PKG_ID;
}
sih->issim = IS_SIM(sih->chippkg);
/* scan for cores */
if (CHIPTYPE(sii->pub.socitype) == SOCI_SB) {
SI_MSG(("Found chip type SB (0x%08x)\n", w));
sb_scan(&sii->pub, regs, devid);
} else if (CHIPTYPE(sii->pub.socitype) == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w));
/* pass chipc address instead of original core base */
ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
} else if (CHIPTYPE(sii->pub.socitype) == SOCI_UBUS) {
SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip));
/* pass chipc address instead of original core base */
ub_scan(&sii->pub, (void *)(uintptr)cc, devid);
} else {
SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
return NULL;
}
/* no cores found, bail out */
if (sii->numcores == 0) {
SI_ERROR(("si_doattach: could not find any cores\n"));
return NULL;
}
/* bus/core/clk setup */
origidx = SI_CC_IDX;
if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) {
SI_ERROR(("si_doattach: si_buscore_setup failed\n"));
goto exit;
}
if (CHIPID(sih->chip) == BCM4322_CHIP_ID && (((sih->chipst & CST4322_SPROM_OTP_SEL_MASK)
>> CST4322_SPROM_OTP_SEL_SHIFT) == (CST4322_OTP_PRESENT |
CST4322_SPROM_PRESENT))) {
SI_ERROR(("%s: Invalid setting: both SPROM and OTP strapped.\n", __FUNCTION__));
return NULL;
}
/* assume current core is CC */
if ((sii->pub.ccrev == 0x25) && ((CHIPID(sih->chip) == BCM43236_CHIP_ID ||
CHIPID(sih->chip) == BCM43235_CHIP_ID ||
CHIPID(sih->chip) == BCM43234_CHIP_ID ||
CHIPID(sih->chip) == BCM43238_CHIP_ID) &&
(CHIPREV(sii->pub.chiprev) <= 2))) {
if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
uint clkdiv;
clkdiv = R_REG(osh, &cc->clkdiv);
/* otp_clk_div is even number, 120/14 < 9mhz */
clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
W_REG(osh, &cc->clkdiv, clkdiv);
SI_ERROR(("%s: set clkdiv to %x\n", __FUNCTION__, clkdiv));
}
OSL_DELAY(10);
}
if (bustype == PCI_BUS) {
}
pvars = NULL;
BCM_REFERENCE(pvars);
if (sii->pub.ccrev >= 20) {
uint32 gpiopullup = 0, gpiopulldown = 0;
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
ASSERT(cc != NULL);
/* 4314/43142 has pin muxing, don't clear gpio bits */
if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
(CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
gpiopullup |= 0x402e0;
gpiopulldown |= 0x20500;
}
W_REG(osh, &cc->gpiopullup, gpiopullup);
W_REG(osh, &cc->gpiopulldown, gpiopulldown);
si_setcoreidx(sih, origidx);
}
/* clear any previous epidiag-induced target abort */
ASSERT(!si_taclear(sih, FALSE));
return (sii);
exit:
return NULL;
}
/* may be called with core in reset */
void
si_detach(si_t *sih)
{
si_info_t *sii;
uint idx;
sii = SI_INFO(sih);
if (sii == NULL)
return;
if (BUSTYPE(sih->bustype) == SI_BUS)
for (idx = 0; idx < SI_MAXCORES; idx++)
if (sii->regs[idx]) {
REG_UNMAP(sii->regs[idx]);
sii->regs[idx] = NULL;
}
#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
if (sii != &ksii)
#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
MFREE(sii->osh, sii, sizeof(si_info_t));
}
void *
si_osh(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->osh;
}
void
si_setosh(si_t *sih, osl_t *osh)
{
si_info_t *sii;
sii = SI_INFO(sih);
if (sii->osh != NULL) {
SI_ERROR(("osh is already set....\n"));
ASSERT(!sii->osh);
}
sii->osh = osh;
}
/* register driver interrupt disabling and restoring callback functions */
void
si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg)
{
si_info_t *sii;
sii = SI_INFO(sih);
sii->intr_arg = intr_arg;
sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
sii->intrsenabled_fn = (si_intrsenabled_t)intrsenabled_fn;
/* save current core id. when this function called, the current core
* must be the core which provides driver functions(il, et, wl, etc.)
*/
sii->dev_coreid = sii->coreid[sii->curidx];
}
void
si_deregister_intr_callback(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
sii->intrsoff_fn = NULL;
}
uint
si_intflag(si_t *sih)
{
si_info_t *sii = SI_INFO(sih);
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_intflag(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return R_REG(sii->osh, ((uint32 *)(uintptr)
(sii->oob_router + OOB_STATUSA)));
else {
ASSERT(0);
return 0;
}
}
uint
si_flag(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_flag(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_flag(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_flag(sih);
else {
ASSERT(0);
return 0;
}
}
void
si_setint(si_t *sih, int siflag)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_setint(sih, siflag);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_setint(sih, siflag);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_setint(sih, siflag);
else
ASSERT(0);
}
uint
si_coreid(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->coreid[sii->curidx];
}
uint
si_coreidx(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
return sii->curidx;
}
/* return the core-type instantiation # of the current core */
uint
si_coreunit(si_t *sih)
{
si_info_t *sii;
uint idx;
uint coreid;
uint coreunit;
uint i;
sii = SI_INFO(sih);
coreunit = 0;
idx = sii->curidx;
ASSERT(GOODREGS(sii->curmap));
coreid = si_coreid(sih);
/* count the cores of our type */
for (i = 0; i < idx; i++)
if (sii->coreid[i] == coreid)
coreunit++;
return (coreunit);
}
uint
si_corevendor(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corevendor(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corevendor(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corevendor(sih);
else {
ASSERT(0);
return 0;
}
}
bool
si_backplane64(si_t *sih)
{
return ((sih->cccaps & CC_CAP_BKPLN64) != 0);
}
uint
si_corerev(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corerev(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corerev(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corerev(sih);
else {
ASSERT(0);
return 0;
}
}
/* return index of coreid or BADIDX if not found */
uint
si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
{
si_info_t *sii;
uint found;
uint i;
sii = SI_INFO(sih);
found = 0;
for (i = 0; i < sii->numcores; i++)
if (sii->coreid[i] == coreid) {
if (found == coreunit)
return (i);
found++;
}
return (BADIDX);
}
/* return list of found cores */
uint
si_corelist(si_t *sih, uint coreid[])
{
si_info_t *sii;
sii = SI_INFO(sih);
bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
return (sii->numcores);
}
/* return current register mapping */
void *
si_coreregs(si_t *sih)
{
si_info_t *sii;
sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curmap));
return (sii->curmap);
}
/*
* This function changes logical "focus" to the indicated core;
* must be called with interrupts off.
* Moreover, callers should keep interrupts off during switching out of and back to d11 core
*/
void *
si_setcore(si_t *sih, uint coreid, uint coreunit)
{
uint idx;
idx = si_findcoreidx(sih, coreid, coreunit);
if (!GOODIDX(idx))
return (NULL);
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, idx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_setcoreidx(sih, idx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_setcoreidx(sih, idx);
else {
ASSERT(0);
return NULL;
}
}
void *
si_setcoreidx(si_t *sih, uint coreidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, coreidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_setcoreidx(sih, coreidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_setcoreidx(sih, coreidx);
else {
ASSERT(0);
return NULL;
}
}
/* Turn off interrupt as required by sb_setcore, before switch core */
void *
si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
{
void *cc;
si_info_t *sii;
sii = SI_INFO(sih);
if (SI_FAST(sii)) {
/* Overloading the origidx variable to remember the coreid,
* this works because the core ids cannot be confused with
* core indices.
*/
*origidx = coreid;
if (coreid == CC_CORE_ID)
return (void *)CCREGS_FAST(sii);
else if (coreid == sih->buscoretype)
return (void *)PCIEREGS(sii);
}
INTR_OFF(sii, *intr_val);
*origidx = sii->curidx;
cc = si_setcore(sih, coreid, 0);
ASSERT(cc != NULL);
return cc;
}
/* restore coreidx and restore interrupt */
void
si_restore_core(si_t *sih, uint coreid, uint intr_val)
{
si_info_t *sii;
sii = SI_INFO(sih);
if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
return;
si_setcoreidx(sih, coreid);
INTR_RESTORE(sii, intr_val);
}
int
si_numaddrspaces(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_numaddrspaces(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_numaddrspaces(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_numaddrspaces(sih);
else {
ASSERT(0);
return 0;
}
}
uint32
si_addrspace(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_addrspace(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_addrspace(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_addrspace(sih, asidx);
else {
ASSERT(0);
return 0;
}
}
uint32
si_addrspacesize(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_addrspacesize(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_addrspacesize(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_addrspacesize(sih, asidx);
else {
ASSERT(0);
return 0;
}
}
void
si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
{
/* Only supported for SOCI_AI */
if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_coreaddrspaceX(sih, asidx, addr, size);
else
*size = 0;
}
uint32
si_core_cflags(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_cflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_core_cflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_core_cflags(sih, mask, val);
else {
ASSERT(0);
return 0;
}
}
void
si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_cflags_wo(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_cflags_wo(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_cflags_wo(sih, mask, val);
else
ASSERT(0);
}
uint32
si_core_sflags(si_t *sih, uint32 mask, uint32 val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_sflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_core_sflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_core_sflags(sih, mask, val);
else {
ASSERT(0);
return 0;
}
}
bool
si_iscoreup(si_t *sih)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_iscoreup(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_iscoreup(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_iscoreup(sih);
else {
ASSERT(0);
return FALSE;
}
}
uint
si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
{
/* only for AI back plane chips */
if (CHIPTYPE(sih->socitype) == SOCI_AI)
return (ai_wrap_reg(sih, offset, mask, val));
return 0;
}
uint
si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corereg(sih, coreidx, regoff, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
return ai_corereg(sih, coreidx, regoff, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corereg(sih, coreidx, regoff, mask, val);
else {
ASSERT(0);
return 0;
}
}
void
si_core_disable(si_t *sih, uint32 bits)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_disable(sih, bits);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_disable(sih, bits);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_disable(sih, bits);
}
void
si_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_reset(sih, bits, resetbits);
else if (CHIPTYPE(sih->socitype) == SOCI_AI)
ai_core_reset(sih, bits, resetbits);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_reset(sih, bits, resetbits);
}
/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
int
si_corebist(si_t *sih)
{
uint32 cflags;
int result = 0;
/* Read core control flags */
cflags = si_core_cflags(sih, 0, 0);
/* Set bist & fgc */
si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC));
/* Wait for bist done */
SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);
if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
result = BCME_ERROR;
/* Reset core control flags */
si_core_cflags(sih, 0xffff, cflags);
return result;
}
static uint32
factor6(uint32 x)
{
switch (x) {
case CC_F6_2: return 2;
case CC_F6_3: return 3;
case CC_F6_4: return 4;
case CC_F6_5: return 5;
case CC_F6_6: return 6;
case CC_F6_7: return 7;
default: return 0;
}
}
/* calculate the speed the SI would run at given a set of clockcontrol values */
uint32
si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
{
uint32 n1, n2, clock, m1, m2, m3, mc;
n1 = n & CN_N1_MASK;
n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT;
if (pll_type == PLL_TYPE6) {
if (m & CC_T6_MMASK)
return CC_T6_M1;
else
return CC_T6_M0;
} else if ((pll_type == PLL_TYPE1) ||
(pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE4) ||
(pll_type == PLL_TYPE7)) {
n1 = factor6(n1);
n2 += CC_F5_BIAS;
} else if (pll_type == PLL_TYPE2) {
n1 += CC_T2_BIAS;
n2 += CC_T2_BIAS;
ASSERT((n1 >= 2) && (n1 <= 7));
ASSERT((n2 >= 5) && (n2 <= 23));
} else if (pll_type == PLL_TYPE5) {
return (100000000);
} else
ASSERT(0);
/* PLL types 3 and 7 use BASE2 (25Mhz) */
if ((pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE7)) {
clock = CC_CLOCK_BASE2 * n1 * n2;
} else
clock = CC_CLOCK_BASE1 * n1 * n2;
if (clock == 0)
return 0;
m1 = m & CC_M1_MASK;
m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT;
m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT;
mc = (m & CC_MC_MASK) >> CC_MC_SHIFT;
if ((pll_type == PLL_TYPE1) ||
(pll_type == PLL_TYPE3) ||
(pll_type == PLL_TYPE4) ||
(pll_type == PLL_TYPE7)) {
m1 = factor6(m1);
if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3))
m2 += CC_F5_BIAS;
else
m2 = factor6(m2);
m3 = factor6(m3);
switch (mc) {
case CC_MC_BYPASS: return (clock);
case CC_MC_M1: return (clock / m1);
case CC_MC_M1M2: return (clock / (m1 * m2));
case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3));
case CC_MC_M1M3: return (clock / (m1 * m3));
default: return (0);
}
} else {
ASSERT(pll_type == PLL_TYPE2);
m1 += CC_T2_BIAS;
m2 += CC_T2M2_BIAS;
m3 += CC_T2_BIAS;
ASSERT((m1 >= 2) && (m1 <= 7));
ASSERT((m2 >= 3) && (m2 <= 10));
ASSERT((m3 >= 2) && (m3 <= 7));
if ((mc & CC_T2MC_M1BYP) == 0)
clock /= m1;
if ((mc & CC_T2MC_M2BYP) == 0)
clock /= m2;
if ((mc & CC_T2MC_M3BYP) == 0)
clock /= m3;
return (clock);
}
}
/* set chip watchdog reset timer to fire in 'ticks' */
void
si_watchdog(si_t *sih, uint ticks)
{
uint nb, maxt;
if (PMUCTL_ENAB(sih)) {
if ((CHIPID(sih->chip) == BCM4319_CHIP_ID) &&
(CHIPREV(sih->chiprev) == 0) && (ticks != 0)) {
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, clk_ctl_st), ~0, 0x2);
si_setcore(sih, USB20D_CORE_ID, 0);
si_core_disable(sih, 1);
si_setcore(sih, CC_CORE_ID, 0);
}
nb = (sih->ccrev < 26) ? 16 : ((sih->ccrev >= 37) ? 32 : 24);
/* The mips compiler uses the sllv instruction,
* so we specially handle the 32-bit case.
*/
if (nb == 32)
maxt = 0xffffffff;
else
maxt = ((1 << nb) - 1);
if (ticks == 1)
ticks = 2;
else if (ticks > maxt)
ticks = maxt;
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
} else {
maxt = (1 << 28) - 1;
if (ticks > maxt)
ticks = maxt;
si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, ticks);
}
}
/* trigger watchdog reset after ms milliseconds */
void
si_watchdog_ms(si_t *sih, uint32 ms)
{
si_watchdog(sih, wd_msticks * ms);
}
uint32 si_watchdog_msticks(void)
{
return wd_msticks;
}
bool
si_taclear(si_t *sih, bool details)
{
return FALSE;
}
/* return the slow clock source - LPO, XTAL, or PCI */
static uint
si_slowclk_src(si_info_t *sii)
{
chipcregs_t *cc;
ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
if (sii->pub.ccrev < 6) {
if ((BUSTYPE(sii->pub.bustype) == PCI_BUS) &&
(OSL_PCI_READ_CONFIG(sii->osh, PCI_GPIO_OUT, sizeof(uint32)) &
PCI_CFG_GPIO_SCS))
return (SCC_SS_PCI);
else
return (SCC_SS_XTAL);
} else if (sii->pub.ccrev < 10) {
cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx);
return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK);
} else /* Insta-clock */
return (SCC_SS_XTAL);
}
/* return the ILP (slowclock) min or max frequency */
static uint
si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
{
uint32 slowclk;
uint div;
ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID);
/* shouldn't be here unless we've established the chip has dynamic clk control */
ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL);
slowclk = si_slowclk_src(sii);
if (sii->pub.ccrev < 6) {
if (slowclk == SCC_SS_PCI)
return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64));
else
return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32));
} else if (sii->pub.ccrev < 10) {
div = 4 *
(((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
if (slowclk == SCC_SS_LPO)
return (max_freq ? LPOMAXFREQ : LPOMINFREQ);
else if (slowclk == SCC_SS_XTAL)
return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div));
else if (slowclk == SCC_SS_PCI)
return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div));
else
ASSERT(0);
} else {
/* Chipc rev 10 is InstaClock */
div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT;
div = 4 * (div + 1);
return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div));
}
return (0);
}
static void
si_clkctl_setdelay(si_info_t *sii, void *chipcregs)
{
chipcregs_t *cc = (chipcregs_t *)chipcregs;
uint slowmaxfreq, pll_delay, slowclk;
uint pll_on_delay, fref_sel_delay;
pll_delay = PLL_DELAY;
/* If the slow clock is not sourced by the xtal then add the xtal_on_delay
* since the xtal will also be powered down by dynamic clk control logic.
*/
slowclk = si_slowclk_src(sii);
if (slowclk != SCC_SS_XTAL)
pll_delay += XTAL_ON_DELAY;
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay);
W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
}
/* initialize power control delay registers */
void
si_clkctl_init(si_t *sih)
{
si_info_t *sii;
uint origidx = 0;
chipcregs_t *cc;
bool fast;
if (!CCCTL_ENAB(sih))
return;
sii = SI_INFO(sih);
fast = SI_FAST(sii);
if (!fast) {
origidx = sii->curidx;
if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL)
return;
} else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL)
return;
ASSERT(cc != NULL);
/* set all Instaclk chip ILP to 1 MHz */
if (sih->ccrev >= 10)
SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK,
(ILP_DIV_1MHZ << SYCC_CD_SHIFT));
si_clkctl_setdelay(sii, (void *)(uintptr)cc);
if (!fast)
si_setcoreidx(sih, origidx);
}
/* change logical "focus" to the gpio core for optimized access */
void *
si_gpiosetcore(si_t *sih)
{
return (si_setcoreidx(sih, SI_CC_IDX));
}
/*
* mask & set gpiocontrol bits.
* If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
* If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
* to some chip-specific purpose.
*/
uint32
si_gpiocontrol(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiocontrol);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio output enable bits */
uint32
si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpioouten);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio output bits */
uint32
si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
regoff = 0;
/* gpios could be shared on router platforms
* ignore reservation if it's high priority (e.g., test apps)
*/
if ((priority != GPIO_HI_PRIORITY) &&
(BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpioout);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* reserve one gpio */
uint32
si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
/* only cores on SI_BUS share GPIO's and only applcation users need to
* reserve/release GPIO
*/
if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
return 0xffffffff;
}
/* make sure only one bit is set */
if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
return 0xffffffff;
}
/* already reserved */
if (si_gpioreservation & gpio_bitmask)
return 0xffffffff;
/* set reservation */
si_gpioreservation |= gpio_bitmask;
return si_gpioreservation;
}
/* release one gpio */
/*
* releasing the gpio doesn't change the current value on the GPIO last write value
* persists till some one overwrites it
*/
uint32
si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
/* only cores on SI_BUS share GPIO's and only applcation users need to
* reserve/release GPIO
*/
if ((BUSTYPE(sih->bustype) != SI_BUS) || (!priority)) {
ASSERT((BUSTYPE(sih->bustype) == SI_BUS) && (priority));
return 0xffffffff;
}
/* make sure only one bit is set */
if ((!gpio_bitmask) || ((gpio_bitmask) & (gpio_bitmask - 1))) {
ASSERT((gpio_bitmask) && !((gpio_bitmask) & (gpio_bitmask - 1)));
return 0xffffffff;
}
/* already released */
if (!(si_gpioreservation & gpio_bitmask))
return 0xffffffff;
/* clear reservation */
si_gpioreservation &= ~gpio_bitmask;
return si_gpioreservation;
}
/* return the current gpioin register value */
uint32
si_gpioin(si_t *sih)
{
uint regoff;
regoff = OFFSETOF(chipcregs_t, gpioin);
return (si_corereg(sih, SI_CC_IDX, regoff, 0, 0));
}
/* mask&set gpio interrupt polarity bits */
uint32
si_gpiointpolarity(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
/* gpios could be shared on router platforms */
if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiointpolarity);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* mask&set gpio interrupt mask bits */
uint32
si_gpiointmask(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
uint regoff;
/* gpios could be shared on router platforms */
if ((BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) {
mask = priority ? (si_gpioreservation & mask) :
((si_gpioreservation | mask) & ~(si_gpioreservation));
val &= mask;
}
regoff = OFFSETOF(chipcregs_t, gpiointmask);
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
/* assign the gpio to an led */
uint32
si_gpioled(si_t *sih, uint32 mask, uint32 val)
{
if (sih->ccrev < 16)
return 0xffffffff;
/* gpio led powersave reg */
return (si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimeroutmask), mask, val));
}
/* mask&set gpio timer val */
uint32
si_gpiotimerval(si_t *sih, uint32 mask, uint32 gpiotimerval)
{
if (sih->ccrev < 16)
return 0xffffffff;
return (si_corereg(sih, SI_CC_IDX,
OFFSETOF(chipcregs_t, gpiotimerval), mask, gpiotimerval));
}
uint32
si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val)
{
uint offs;
if (sih->ccrev < 20)
return 0xffffffff;
offs = (updown ? OFFSETOF(chipcregs_t, gpiopulldown) : OFFSETOF(chipcregs_t, gpiopullup));
return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
}
uint32
si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val)
{
uint offs;
if (sih->ccrev < 11)
return 0xffffffff;
if (regtype == GPIO_REGEVT)
offs = OFFSETOF(chipcregs_t, gpioevent);
else if (regtype == GPIO_REGEVT_INTMSK)
offs = OFFSETOF(chipcregs_t, gpioeventintmask);
else if (regtype == GPIO_REGEVT_INTPOL)
offs = OFFSETOF(chipcregs_t, gpioeventintpolarity);
else
return 0xffffffff;
return (si_corereg(sih, SI_CC_IDX, offs, mask, val));
}
void *
si_gpio_handler_register(si_t *sih, uint32 event,
bool level, gpio_handler_t cb, void *arg)
{
si_info_t *sii;
gpioh_item_t *gi;
ASSERT(event);
ASSERT(cb != NULL);
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return NULL;
if ((gi = MALLOC(sii->osh, sizeof(gpioh_item_t))) == NULL)
return NULL;
bzero(gi, sizeof(gpioh_item_t));
gi->event = event;
gi->handler = cb;
gi->arg = arg;
gi->level = level;
gi->next = sii->gpioh_head;
sii->gpioh_head = gi;
return (void *)(gi);
}
void
si_gpio_handler_unregister(si_t *sih, void *gpioh)
{
si_info_t *sii;
gpioh_item_t *p, *n;
sii = SI_INFO(sih);
if (sih->ccrev < 11)
return;
ASSERT(sii->gpioh_head != NULL);
if ((void*)sii->gpioh_head == gpioh) {
sii->gpioh_head = sii->gpioh_head->next;
MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
return;
} else {
p = sii->gpioh_head;
n = p->next;
while (n) {
if ((void*)n == gpioh) {
p->next = n->next;
MFREE(sii->osh, gpioh, sizeof(gpioh_item_t));
return;
}
p = n;
n = n->next;
}
}
ASSERT(0); /* Not found in list */
}
void
si_gpio_handler_process(si_t *sih)
{
si_info_t *sii;
gpioh_item_t *h;
uint32 level = si_gpioin(sih);
uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
sii = SI_INFO(sih);
for (h = sii->gpioh_head; h != NULL; h = h->next) {
if (h->handler) {
uint32 status = (h->level ? level : edge) & h->event;
uint32 polarity = (h->level ? levelp : edgep) & h->event;
/* polarity bitval is opposite of status bitval */
if (status ^ polarity)
h->handler(status, h->arg);
}
}
si_gpioevent(sih, GPIO_REGEVT, edge, edge); /* clear edge-trigger status */
}
uint32
si_gpio_int_enable(si_t *sih, bool enable)
{
uint offs;
if (sih->ccrev < 11)
return 0xffffffff;
offs = OFFSETOF(chipcregs_t, intmask);
return (si_corereg(sih, SI_CC_IDX, offs, CI_GPIO, (enable ? CI_GPIO : 0)));
}
/* Return the size of the specified SOCRAM bank */
static uint
socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
{
uint banksize, bankinfo;
uint bankidx = idx | (mem_type << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
ASSERT(mem_type <= SOCRAM_MEMTYPE_DEVRAM);
W_REG(sii->osh, ®s->bankidx, bankidx);
bankinfo = R_REG(sii->osh, ®s->bankinfo);
banksize = SOCRAM_BANKINFO_SZBASE * ((bankinfo & SOCRAM_BANKINFO_SZMASK) + 1);
return banksize;
}
void
si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
if (!set)
*enable = *protect = *remap = 0;
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
if (corerev >= 10) {
uint32 extcinfo;
uint8 nb;
uint8 i;
uint32 bankidx, bankinfo;
extcinfo = R_REG(sii->osh, ®s->extracoreinfo);
nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
for (i = 0; i < nb; i++) {
bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
W_REG(sii->osh, ®s->bankidx, bankidx);
bankinfo = R_REG(sii->osh, ®s->bankinfo);
if (set) {
bankinfo &= ~SOCRAM_BANKINFO_DEVRAMSEL_MASK;
bankinfo &= ~SOCRAM_BANKINFO_DEVRAMPRO_MASK;
bankinfo &= ~SOCRAM_BANKINFO_DEVRAMREMAP_MASK;
if (*enable) {
bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMSEL_SHIFT);
if (*protect)
bankinfo |= (1 << SOCRAM_BANKINFO_DEVRAMPRO_SHIFT);
if ((corerev >= 16) && *remap)
bankinfo |=
(1 << SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT);
}
W_REG(sii->osh, ®s->bankinfo, bankinfo);
}
else if (i == 0) {
if (bankinfo & SOCRAM_BANKINFO_DEVRAMSEL_MASK) {
*enable = 1;
if (bankinfo & SOCRAM_BANKINFO_DEVRAMPRO_MASK)
*protect = 1;
if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK)
*remap = 1;
}
}
}
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
}
bool
si_socdevram_remap_isenb(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup, remap = FALSE;
uint corerev;
uint32 extcinfo;
uint8 nb;
uint8 i;
uint32 bankidx, bankinfo;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
if (corerev >= 16) {
extcinfo = R_REG(sii->osh, ®s->extracoreinfo);
nb = ((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT);
for (i = 0; i < nb; i++) {
bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
W_REG(sii->osh, ®s->bankidx, bankidx);
bankinfo = R_REG(sii->osh, ®s->bankinfo);
if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
remap = TRUE;
break;
}
}
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return remap;
}
bool
si_socdevram_pkg(si_t *sih)
{
if (si_socdevram_size(sih) > 0)
return TRUE;
else
return FALSE;
}
uint32
si_socdevram_size(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
uint32 memsize = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
if (corerev >= 10) {
uint32 extcinfo;
uint8 nb;
uint8 i;
extcinfo = R_REG(sii->osh, ®s->extracoreinfo);
nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
for (i = 0; i < nb; i++)
memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return memsize;
}
uint32
si_socdevram_remap_size(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
uint32 memsize = 0, banksz;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
uint32 extcinfo;
uint8 nb;
uint8 i;
uint32 bankidx, bankinfo;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
if (corerev >= 16) {
extcinfo = R_REG(sii->osh, ®s->extracoreinfo);
nb = (((extcinfo & SOCRAM_DEVRAMBANK_MASK) >> SOCRAM_DEVRAMBANK_SHIFT));
/*
* FIX: A0 Issue: Max addressable is 512KB, instead 640KB
* Only four banks are accessible to ARM
*/
if ((corerev == 16) && (nb == 5))
nb = 4;
for (i = 0; i < nb; i++) {
bankidx = i | (SOCRAM_MEMTYPE_DEVRAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT);
W_REG(sii->osh, ®s->bankidx, bankidx);
bankinfo = R_REG(sii->osh, ®s->bankinfo);
if (bankinfo & SOCRAM_BANKINFO_DEVRAMREMAP_MASK) {
banksz = socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_DEVRAM);
memsize += banksz;
} else {
/* Account only consecutive banks for now */
break;
}
}
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return memsize;
}
/* Return the RAM size of the SOCRAM core */
uint32
si_socram_size(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
uint32 coreinfo;
uint memsize = 0;
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
coreinfo = R_REG(sii->osh, ®s->coreinfo);
/* Calculate size from coreinfo based on rev */
if (corerev == 0)
memsize = 1 << (16 + (coreinfo & SRCI_MS0_MASK));
else if (corerev < 3) {
memsize = 1 << (SR_BSZ_BASE + (coreinfo & SRCI_SRBSZ_MASK));
memsize *= (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
} else if ((corerev <= 7) || (corerev == 12)) {
uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
uint bsz = (coreinfo & SRCI_SRBSZ_MASK);
uint lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT;
if (lss != 0)
nb --;
memsize = nb * (1 << (bsz + SR_BSZ_BASE));
if (lss != 0)
memsize += (1 << ((lss - 1) + SR_BSZ_BASE));
} else {
uint8 i;
uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
for (i = 0; i < nb; i++)
memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return memsize;
}
uint32
si_socram_srmem_size(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
uint32 coreinfo;
uint memsize = 0;
if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
return (32 * 1024);
}
sii = SI_INFO(sih);
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
/* Switch to SOCRAM core */
if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
goto done;
/* Get info for determining size */
if (!(wasup = si_iscoreup(sih)))
si_core_reset(sih, 0, 0);
corerev = si_corerev(sih);
coreinfo = R_REG(sii->osh, ®s->coreinfo);
/* Calculate size from coreinfo based on rev */
if (corerev >= 16) {
uint8 i;
uint nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
for (i = 0; i < nb; i++) {
W_REG(sii->osh, ®s->bankidx, i);
if (R_REG(sii->osh, ®s->bankinfo) & SOCRAM_BANKINFO_RETNTRAM_MASK)
memsize += socram_banksize(sii, regs, i, SOCRAM_MEMTYPE_RAM);
}
}
/* Return to previous state and core */
if (!wasup)
si_core_disable(sih, 0);
si_setcoreidx(sih, origidx);
done:
INTR_RESTORE(sii, intr_val);
return memsize;
}
void
si_btcgpiowar(si_t *sih)
{
si_info_t *sii;
uint origidx;
uint intr_val = 0;
chipcregs_t *cc;
sii = SI_INFO(sih);
/* Make sure that there is ChipCommon core present &&
* UART_TX is strapped to 1
*/
if (!(sih->cccaps & CC_CAP_UARTGPIO))
return;
/* si_corereg cannot be used as we have to guarantee 8-bit read/writes */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
ASSERT(cc != NULL);
W_REG(sii->osh, &cc->uart0mcr, R_REG(sii->osh, &cc->uart0mcr) | 0x04);
/* restore the original index */
si_setcoreidx(sih, origidx);
INTR_RESTORE(sii, intr_val);
}
void
si_chipcontrl_btshd0_4331(si_t *sih, bool on)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
uint32 val;
uint intr_val = 0;
sii = SI_INFO(sih);
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
/* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
if (on) {
/* Enable bt_shd0 on gpio4: */
val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
W_REG(sii->osh, &cc->chipcontrol, val);
} else {
val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
W_REG(sii->osh, &cc->chipcontrol, val);
}
/* restore the original index */
si_setcoreidx(sih, origidx);
INTR_RESTORE(sii, intr_val);
}
void
si_chipcontrl_restore(si_t *sih, uint32 val)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(sii->osh, &cc->chipcontrol, val);
si_setcoreidx(sih, origidx);
}
uint32
si_chipcontrl_read(si_t *sih)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
uint32 val;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
si_setcoreidx(sih, origidx);
return val;
}
void
si_chipcontrl_epa4331(si_t *sih, bool on)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
uint32 val;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
if (sih->chippkg == 9 || sih->chippkg == 0xb) {
val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
/* Ext PA Controls for 4331 12x9 Package */
W_REG(sii->osh, &cc->chipcontrol, val);
} else {
/* Ext PA Controls for 4331 12x12 Package */
if (sih->chiprev > 0) {
W_REG(sii->osh, &cc->chipcontrol, val |
(CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
} else {
W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
}
}
} else {
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
W_REG(sii->osh, &cc->chipcontrol, val);
}
si_setcoreidx(sih, origidx);
}
/* switch muxed pins, on: SROM, off: FEMCTRL */
void
si_chipcontrl_srom4360(si_t *sih, bool on)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
uint32 val;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
val &= ~(CCTRL4360_SECI_MODE |
CCTRL4360_BTSWCTRL_MODE |
CCTRL4360_EXTRA_FEMCTRL_MODE |
CCTRL4360_BT_LGCY_MODE |
CCTRL4360_CORE2FEMCTRL4_ON);
W_REG(sii->osh, &cc->chipcontrol, val);
} else {
}
si_setcoreidx(sih, origidx);
}
void
si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
uint32 val;
bool sel_chip;
sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
(CHIPID(sih->chip) == BCM43431_CHIP_ID);
sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
if (!sel_chip)
return;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
if (enter_wowl) {
val |= CCTRL4331_EXTPA_EN;
W_REG(sii->osh, &cc->chipcontrol, val);
} else {
val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
W_REG(sii->osh, &cc->chipcontrol, val);
}
si_setcoreidx(sih, origidx);
}
uint
si_pll_reset(si_t *sih)
{
uint err = 0;
return (err);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void
si_epa_4313war(si_t *sih)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
W_REG(sii->osh, &cc->gpiocontrol,
R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
si_setcoreidx(sih, origidx);
}
void
si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
{
}
/* WL/BT control for 4313 btcombo boards >= P250 */
void
si_btcombo_p250_4313_war(si_t *sih)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(sii->osh, &cc->gpiocontrol,
R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
W_REG(sii->osh, &cc->gpioouten,
R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
si_setcoreidx(sih, origidx);
}
void
si_btc_enable_chipcontrol(si_t *sih)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
/* BT fix */
W_REG(sii->osh, &cc->chipcontrol,
R_REG(sii->osh, &cc->chipcontrol) | CC_BTCOEX_EN_MASK);
si_setcoreidx(sih, origidx);
}
void
si_btcombo_43228_war(si_t *sih)
{
si_info_t *sii;
chipcregs_t *cc;
uint origidx;
sii = SI_INFO(sih);
origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
si_setcoreidx(sih, origidx);
}
/* check if the device is removed */
bool
si_deviceremoved(si_t *sih)
{
uint32 w;
si_info_t *sii;
sii = SI_INFO(sih);
switch (BUSTYPE(sih->bustype)) {
case PCI_BUS:
ASSERT(sii->osh != NULL);
w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
if ((w & 0xFFFF) != VENDOR_BROADCOM)
return TRUE;
break;
}
return FALSE;
}
bool
si_is_sprom_available(si_t *sih)
{
if (sih->ccrev >= 31) {
si_info_t *sii;
uint origidx;
chipcregs_t *cc;
uint32 sromctrl;
if ((sih->cccaps & CC_CAP_SROM) == 0)
return FALSE;
sii = SI_INFO(sih);
origidx = sii->curidx;
cc = si_setcoreidx(sih, SI_CC_IDX);
sromctrl = R_REG(sii->osh, &cc->sromcontrol);
si_setcoreidx(sih, origidx);
return (sromctrl & SRC_PRESENT);
}
switch (CHIPID(sih->chip)) {
case BCM4312_CHIP_ID:
return ((sih->chipst & CST4312_SPROM_OTP_SEL_MASK) != CST4312_OTP_SEL);
case BCM4325_CHIP_ID:
return (sih->chipst & CST4325_SPROM_SEL) != 0;
case BCM4322_CHIP_ID: case BCM43221_CHIP_ID: case BCM43231_CHIP_ID:
case BCM43222_CHIP_ID: case BCM43111_CHIP_ID: case BCM43112_CHIP_ID:
case BCM4342_CHIP_ID: {
uint32 spromotp;
spromotp = (sih->chipst & CST4322_SPROM_OTP_SEL_MASK) >>
CST4322_SPROM_OTP_SEL_SHIFT;
return (spromotp & CST4322_SPROM_PRESENT) != 0;
}
case BCM4329_CHIP_ID:
return (sih->chipst & CST4329_SPROM_SEL) != 0;
case BCM4315_CHIP_ID:
return (sih->chipst & CST4315_SPROM_SEL) != 0;
case BCM4319_CHIP_ID:
return (sih->chipst & CST4319_SPROM_SEL) != 0;
case BCM4336_CHIP_ID:
case BCM43362_CHIP_ID:
return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
case BCM4330_CHIP_ID:
return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
case BCM4313_CHIP_ID:
return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
case BCM4331_CHIP_ID:
case BCM43431_CHIP_ID:
return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
case BCM43239_CHIP_ID:
return ((sih->chipst & CST43239_SPROM_MASK) &&
!(sih->chipst & CST43239_SFLASH_MASK));
case BCM4324_CHIP_ID:
return ((sih->chipst & CST4324_SPROM_MASK) &&
!(sih->chipst & CST4324_SFLASH_MASK));
case BCM43131_CHIP_ID:
case BCM43217_CHIP_ID:
case BCM43227_CHIP_ID:
case BCM43228_CHIP_ID:
case BCM43428_CHIP_ID:
return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
default:
return TRUE;
}
}
| gpl-2.0 |
mythos234/AndromedaN910F-LL | sound/pci/au88x0/au88x0_game.c | 2923 | 3669 | /*
* Manuel Jander.
*
* Based on the work of:
* Vojtech Pavlik
* Raymond Ingles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*
* Based 90% on Vojtech Pavlik pcigame driver.
* Merged and modified by Manuel Jander, for the OpenVortex
* driver. (email: mjander@embedded.cl).
*/
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <sound/core.h>
#include "au88x0.h"
#include <linux/gameport.h>
#include <linux/export.h>
#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
#define VORTEX_GAME_DWAIT 20 /* 20 ms */
static unsigned char vortex_game_read(struct gameport *gameport)
{
vortex_t *vortex = gameport_get_port_data(gameport);
return hwread(vortex->mmio, VORTEX_GAME_LEGACY);
}
static void vortex_game_trigger(struct gameport *gameport)
{
vortex_t *vortex = gameport_get_port_data(gameport);
hwwrite(vortex->mmio, VORTEX_GAME_LEGACY, 0xff);
}
static int
vortex_game_cooked_read(struct gameport *gameport, int *axes, int *buttons)
{
vortex_t *vortex = gameport_get_port_data(gameport);
int i;
*buttons = (~hwread(vortex->mmio, VORTEX_GAME_LEGACY) >> 4) & 0xf;
for (i = 0; i < 4; i++) {
axes[i] =
hwread(vortex->mmio, VORTEX_GAME_AXIS + (i * AXIS_SIZE));
if (axes[i] == AXIS_RANGE)
axes[i] = -1;
}
return 0;
}
static int vortex_game_open(struct gameport *gameport, int mode)
{
vortex_t *vortex = gameport_get_port_data(gameport);
switch (mode) {
case GAMEPORT_MODE_COOKED:
hwwrite(vortex->mmio, VORTEX_CTRL2,
hwread(vortex->mmio,
VORTEX_CTRL2) | CTRL2_GAME_ADCMODE);
msleep(VORTEX_GAME_DWAIT);
return 0;
case GAMEPORT_MODE_RAW:
hwwrite(vortex->mmio, VORTEX_CTRL2,
hwread(vortex->mmio,
VORTEX_CTRL2) & ~CTRL2_GAME_ADCMODE);
return 0;
default:
return -1;
}
return 0;
}
static int vortex_gameport_register(vortex_t *vortex)
{
struct gameport *gp;
vortex->gameport = gp = gameport_allocate_port();
if (!gp) {
printk(KERN_ERR "vortex: cannot allocate memory for gameport\n");
return -ENOMEM;
}
gameport_set_name(gp, "AU88x0 Gameport");
gameport_set_phys(gp, "pci%s/gameport0", pci_name(vortex->pci_dev));
gameport_set_dev_parent(gp, &vortex->pci_dev->dev);
gp->read = vortex_game_read;
gp->trigger = vortex_game_trigger;
gp->cooked_read = vortex_game_cooked_read;
gp->open = vortex_game_open;
gameport_set_port_data(gp, vortex);
gp->fuzz = 64;
gameport_register_port(gp);
return 0;
}
static void vortex_gameport_unregister(vortex_t * vortex)
{
if (vortex->gameport) {
gameport_unregister_port(vortex->gameport);
vortex->gameport = NULL;
}
}
#else
static inline int vortex_gameport_register(vortex_t * vortex) { return -ENOSYS; }
static inline void vortex_gameport_unregister(vortex_t * vortex) { }
#endif
| gpl-2.0 |
compulab/trimslice-android-kernel | drivers/input/keyboard/davinci_keyscan.c | 2923 | 9422 | /*
* DaVinci Key Scan Driver for TI platforms
*
* Copyright (C) 2009 Texas Instruments, Inc
*
* Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
*
* Initial Code: Sandeep Paulraj <s-paulraj@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/keyscan.h>
/* Key scan registers */
#define DAVINCI_KEYSCAN_KEYCTRL 0x0000
#define DAVINCI_KEYSCAN_INTENA 0x0004
#define DAVINCI_KEYSCAN_INTFLAG 0x0008
#define DAVINCI_KEYSCAN_INTCLR 0x000c
#define DAVINCI_KEYSCAN_STRBWIDTH 0x0010
#define DAVINCI_KEYSCAN_INTERVAL 0x0014
#define DAVINCI_KEYSCAN_CONTTIME 0x0018
#define DAVINCI_KEYSCAN_CURRENTST 0x001c
#define DAVINCI_KEYSCAN_PREVSTATE 0x0020
#define DAVINCI_KEYSCAN_EMUCTRL 0x0024
#define DAVINCI_KEYSCAN_IODFTCTRL 0x002c
/* Key Control Register (KEYCTRL) */
#define DAVINCI_KEYSCAN_KEYEN 0x00000001
#define DAVINCI_KEYSCAN_PREVMODE 0x00000002
#define DAVINCI_KEYSCAN_CHATOFF 0x00000004
#define DAVINCI_KEYSCAN_AUTODET 0x00000008
#define DAVINCI_KEYSCAN_SCANMODE 0x00000010
#define DAVINCI_KEYSCAN_OUTTYPE 0x00000020
/* Masks for the interrupts */
#define DAVINCI_KEYSCAN_INT_CONT 0x00000008
#define DAVINCI_KEYSCAN_INT_OFF 0x00000004
#define DAVINCI_KEYSCAN_INT_ON 0x00000002
#define DAVINCI_KEYSCAN_INT_CHANGE 0x00000001
#define DAVINCI_KEYSCAN_INT_ALL 0x0000000f
struct davinci_ks {
struct input_dev *input;
struct davinci_ks_platform_data *pdata;
int irq;
void __iomem *base;
resource_size_t pbase;
size_t base_size;
unsigned short keymap[];
};
/* Initializing the kp Module */
static int __init davinci_ks_initialize(struct davinci_ks *davinci_ks)
{
struct device *dev = &davinci_ks->input->dev;
struct davinci_ks_platform_data *pdata = davinci_ks->pdata;
u32 matrix_ctrl;
/* Enable all interrupts */
__raw_writel(DAVINCI_KEYSCAN_INT_ALL,
davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
/* Clear interrupts if any */
__raw_writel(DAVINCI_KEYSCAN_INT_ALL,
davinci_ks->base + DAVINCI_KEYSCAN_INTCLR);
/* Setup the scan period = strobe + interval */
__raw_writel(pdata->strobe,
davinci_ks->base + DAVINCI_KEYSCAN_STRBWIDTH);
__raw_writel(pdata->interval,
davinci_ks->base + DAVINCI_KEYSCAN_INTERVAL);
__raw_writel(0x01,
davinci_ks->base + DAVINCI_KEYSCAN_CONTTIME);
/* Define matrix type */
switch (pdata->matrix_type) {
case DAVINCI_KEYSCAN_MATRIX_4X4:
matrix_ctrl = 0;
break;
case DAVINCI_KEYSCAN_MATRIX_5X3:
matrix_ctrl = (1 << 6);
break;
default:
dev_err(dev->parent, "wrong matrix type\n");
return -EINVAL;
}
/* Enable key scan module and set matrix type */
__raw_writel(DAVINCI_KEYSCAN_AUTODET | DAVINCI_KEYSCAN_KEYEN |
matrix_ctrl, davinci_ks->base + DAVINCI_KEYSCAN_KEYCTRL);
return 0;
}
static irqreturn_t davinci_ks_interrupt(int irq, void *dev_id)
{
struct davinci_ks *davinci_ks = dev_id;
struct device *dev = &davinci_ks->input->dev;
unsigned short *keymap = davinci_ks->keymap;
int keymapsize = davinci_ks->pdata->keymapsize;
u32 prev_status, new_status, changed;
bool release;
int keycode = KEY_UNKNOWN;
int i;
/* Disable interrupt */
__raw_writel(0x0, davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
/* Reading previous and new status of the key scan */
prev_status = __raw_readl(davinci_ks->base + DAVINCI_KEYSCAN_PREVSTATE);
new_status = __raw_readl(davinci_ks->base + DAVINCI_KEYSCAN_CURRENTST);
changed = prev_status ^ new_status;
if (changed) {
/*
* It goes through all bits in 'changed' to ensure
* that no key changes are being missed
*/
for (i = 0 ; i < keymapsize; i++) {
if ((changed>>i) & 0x1) {
keycode = keymap[i];
release = (new_status >> i) & 0x1;
dev_dbg(dev->parent, "key %d %s\n", keycode,
release ? "released" : "pressed");
input_report_key(davinci_ks->input, keycode,
!release);
input_sync(davinci_ks->input);
}
}
/* Clearing interrupt */
__raw_writel(DAVINCI_KEYSCAN_INT_ALL,
davinci_ks->base + DAVINCI_KEYSCAN_INTCLR);
}
/* Enable interrupts */
__raw_writel(0x1, davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
return IRQ_HANDLED;
}
static int __init davinci_ks_probe(struct platform_device *pdev)
{
struct davinci_ks *davinci_ks;
struct input_dev *key_dev;
struct resource *res, *mem;
struct device *dev = &pdev->dev;
struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
int error, i;
if (pdata->device_enable) {
error = pdata->device_enable(dev);
if (error < 0) {
dev_dbg(dev, "device enable function failed\n");
return error;
}
}
if (!pdata->keymap) {
dev_dbg(dev, "no keymap from pdata\n");
return -EINVAL;
}
davinci_ks = kzalloc(sizeof(struct davinci_ks) +
sizeof(unsigned short) * pdata->keymapsize, GFP_KERNEL);
if (!davinci_ks) {
dev_dbg(dev, "could not allocate memory for private data\n");
return -ENOMEM;
}
memcpy(davinci_ks->keymap, pdata->keymap,
sizeof(unsigned short) * pdata->keymapsize);
key_dev = input_allocate_device();
if (!key_dev) {
dev_dbg(dev, "could not allocate input device\n");
error = -ENOMEM;
goto fail1;
}
davinci_ks->input = key_dev;
davinci_ks->irq = platform_get_irq(pdev, 0);
if (davinci_ks->irq < 0) {
dev_err(dev, "no key scan irq\n");
error = davinci_ks->irq;
goto fail2;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no mem resource\n");
error = -EINVAL;
goto fail2;
}
davinci_ks->pbase = res->start;
davinci_ks->base_size = resource_size(res);
mem = request_mem_region(davinci_ks->pbase, davinci_ks->base_size,
pdev->name);
if (!mem) {
dev_err(dev, "key scan registers at %08x are not free\n",
davinci_ks->pbase);
error = -EBUSY;
goto fail2;
}
davinci_ks->base = ioremap(davinci_ks->pbase, davinci_ks->base_size);
if (!davinci_ks->base) {
dev_err(dev, "can't ioremap MEM resource.\n");
error = -ENOMEM;
goto fail3;
}
/* Enable auto repeat feature of Linux input subsystem */
if (pdata->rep)
__set_bit(EV_REP, key_dev->evbit);
/* Setup input device */
__set_bit(EV_KEY, key_dev->evbit);
/* Setup the platform data */
davinci_ks->pdata = pdata;
for (i = 0; i < davinci_ks->pdata->keymapsize; i++)
__set_bit(davinci_ks->pdata->keymap[i], key_dev->keybit);
key_dev->name = "davinci_keyscan";
key_dev->phys = "davinci_keyscan/input0";
key_dev->dev.parent = &pdev->dev;
key_dev->id.bustype = BUS_HOST;
key_dev->id.vendor = 0x0001;
key_dev->id.product = 0x0001;
key_dev->id.version = 0x0001;
key_dev->keycode = davinci_ks->keymap;
key_dev->keycodesize = sizeof(davinci_ks->keymap[0]);
key_dev->keycodemax = davinci_ks->pdata->keymapsize;
error = input_register_device(davinci_ks->input);
if (error < 0) {
dev_err(dev, "unable to register davinci key scan device\n");
goto fail4;
}
error = request_irq(davinci_ks->irq, davinci_ks_interrupt,
IRQF_DISABLED, pdev->name, davinci_ks);
if (error < 0) {
dev_err(dev, "unable to register davinci key scan interrupt\n");
goto fail5;
}
error = davinci_ks_initialize(davinci_ks);
if (error < 0) {
dev_err(dev, "unable to initialize davinci key scan device\n");
goto fail6;
}
platform_set_drvdata(pdev, davinci_ks);
return 0;
fail6:
free_irq(davinci_ks->irq, davinci_ks);
fail5:
input_unregister_device(davinci_ks->input);
key_dev = NULL;
fail4:
iounmap(davinci_ks->base);
fail3:
release_mem_region(davinci_ks->pbase, davinci_ks->base_size);
fail2:
input_free_device(key_dev);
fail1:
kfree(davinci_ks);
return error;
}
static int __devexit davinci_ks_remove(struct platform_device *pdev)
{
struct davinci_ks *davinci_ks = platform_get_drvdata(pdev);
free_irq(davinci_ks->irq, davinci_ks);
input_unregister_device(davinci_ks->input);
iounmap(davinci_ks->base);
release_mem_region(davinci_ks->pbase, davinci_ks->base_size);
platform_set_drvdata(pdev, NULL);
kfree(davinci_ks);
return 0;
}
static struct platform_driver davinci_ks_driver = {
.driver = {
.name = "davinci_keyscan",
.owner = THIS_MODULE,
},
.remove = __devexit_p(davinci_ks_remove),
};
static int __init davinci_ks_init(void)
{
return platform_driver_probe(&davinci_ks_driver, davinci_ks_probe);
}
module_init(davinci_ks_init);
static void __exit davinci_ks_exit(void)
{
platform_driver_unregister(&davinci_ks_driver);
}
module_exit(davinci_ks_exit);
MODULE_AUTHOR("Miguel Aguilar");
MODULE_DESCRIPTION("Texas Instruments DaVinci Key Scan Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
guohuaW/linux-rpi | drivers/staging/crystalhd/crystalhd_lnx.c | 3179 | 17344 | /***************************************************************************
BCM70010 Linux driver
Copyright (c) 2005-2009, Broadcom Corporation.
This driver is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
This driver is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this driver. If not, see <http://www.gnu.org/licenses/>.
***************************************************************************/
#include <linux/mutex.h>
#include <linux/slab.h>
#include "crystalhd_lnx.h"
static DEFINE_MUTEX(chd_dec_mutex);
static struct class *crystalhd_class;
static struct crystalhd_adp *g_adp_info;
static irqreturn_t chd_dec_isr(int irq, void *arg)
{
struct crystalhd_adp *adp = (struct crystalhd_adp *) arg;
int rc = 0;
if (adp)
rc = crystalhd_cmd_interrupt(&adp->cmds);
return IRQ_RETVAL(rc);
}
static int chd_dec_enable_int(struct crystalhd_adp *adp)
{
int rc = 0;
if (!adp || !adp->pdev) {
BCMLOG_ERR("Invalid arg!!\n");
return -EINVAL;
}
if (adp->pdev->msi_enabled)
adp->msi = 1;
else
adp->msi = pci_enable_msi(adp->pdev);
rc = request_irq(adp->pdev->irq, chd_dec_isr, IRQF_SHARED,
adp->name, (void *)adp);
if (rc) {
BCMLOG_ERR("Interrupt request failed..\n");
pci_disable_msi(adp->pdev);
}
return rc;
}
static int chd_dec_disable_int(struct crystalhd_adp *adp)
{
if (!adp || !adp->pdev) {
BCMLOG_ERR("Invalid arg!!\n");
return -EINVAL;
}
free_irq(adp->pdev->irq, adp);
if (adp->msi)
pci_disable_msi(adp->pdev);
return 0;
}
struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
{
unsigned long flags = 0;
struct crystalhd_ioctl_data *temp;
if (!adp)
return NULL;
spin_lock_irqsave(&adp->lock, flags);
temp = adp->idata_free_head;
if (temp) {
adp->idata_free_head = adp->idata_free_head->next;
memset(temp, 0, sizeof(*temp));
}
spin_unlock_irqrestore(&adp->lock, flags);
return temp;
}
void chd_dec_free_iodata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *iodata,
bool isr)
{
unsigned long flags = 0;
if (!adp || !iodata)
return;
spin_lock_irqsave(&adp->lock, flags);
iodata->next = adp->idata_free_head;
adp->idata_free_head = iodata;
spin_unlock_irqrestore(&adp->lock, flags);
}
static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int set)
{
int rc;
if (!ud || !dr) {
BCMLOG_ERR("Invalid arg\n");
return -EINVAL;
}
if (set)
rc = copy_to_user((void *)ud, dr, size);
else
rc = copy_from_user(dr, (void *)ud, size);
if (rc) {
BCMLOG_ERR("Invalid args for command\n");
rc = -EFAULT;
}
return rc;
}
static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *io,
uint32_t m_sz, unsigned long ua)
{
unsigned long ua_off;
int rc = 0;
if (!adp || !io || !ua || !m_sz) {
BCMLOG_ERR("Invalid Arg!!\n");
return -EINVAL;
}
io->add_cdata = vmalloc(m_sz);
if (!io->add_cdata) {
BCMLOG_ERR("kalloc fail for sz:%x\n", m_sz);
return -ENOMEM;
}
io->add_cdata_sz = m_sz;
ua_off = ua + sizeof(io->udata);
rc = crystalhd_user_data(ua_off, io->add_cdata, io->add_cdata_sz, 0);
if (rc) {
BCMLOG_ERR("failed to pull add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
kfree(io->add_cdata);
io->add_cdata = NULL;
return -ENODATA;
}
return rc;
}
static int chd_dec_release_cdata(struct crystalhd_adp *adp,
struct crystalhd_ioctl_data *io, unsigned long ua)
{
unsigned long ua_off;
int rc;
if (!adp || !io || !ua) {
BCMLOG_ERR("Invalid Arg!!\n");
return -EINVAL;
}
if (io->cmd != BCM_IOC_FW_DOWNLOAD) {
ua_off = ua + sizeof(io->udata);
rc = crystalhd_user_data(ua_off, io->add_cdata,
io->add_cdata_sz, 1);
if (rc) {
BCMLOG_ERR("failed to push add_cdata sz:%x ua_off:%x\n",
io->add_cdata_sz, (unsigned int)ua_off);
return -ENODATA;
}
}
if (io->add_cdata) {
vfree(io->add_cdata);
io->add_cdata = NULL;
}
return 0;
}
static int chd_dec_proc_user_data(struct crystalhd_adp *adp,
struct crystalhd_ioctl_data *io,
unsigned long ua, int set)
{
int rc;
uint32_t m_sz = 0;
if (!adp || !io || !ua) {
BCMLOG_ERR("Invalid Arg!!\n");
return -EINVAL;
}
rc = crystalhd_user_data(ua, &io->udata, sizeof(io->udata), set);
if (rc) {
BCMLOG_ERR("failed to %s iodata\n", (set ? "set" : "get"));
return rc;
}
switch (io->cmd) {
case BCM_IOC_MEM_RD:
case BCM_IOC_MEM_WR:
case BCM_IOC_FW_DOWNLOAD:
m_sz = io->udata.u.devMem.NumDwords * 4;
if (set)
rc = chd_dec_release_cdata(adp, io, ua);
else
rc = chd_dec_fetch_cdata(adp, io, m_sz, ua);
break;
default:
break;
}
return rc;
}
static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua,
uint32_t uid, uint32_t cmd, crystalhd_cmd_proc func)
{
int rc;
struct crystalhd_ioctl_data *temp;
enum BC_STATUS sts = BC_STS_SUCCESS;
temp = chd_dec_alloc_iodata(adp, 0);
if (!temp) {
BCMLOG_ERR("Failed to get iodata..\n");
return -EINVAL;
}
temp->u_id = uid;
temp->cmd = cmd;
rc = chd_dec_proc_user_data(adp, temp, ua, 0);
if (!rc) {
sts = func(&adp->cmds, temp);
if (sts == BC_STS_PENDING)
sts = BC_STS_NOT_IMPL;
temp->udata.RetSts = sts;
rc = chd_dec_proc_user_data(adp, temp, ua, 1);
}
if (temp) {
chd_dec_free_iodata(adp, temp, 0);
temp = NULL;
}
return rc;
}
/* API interfaces */
static long chd_dec_ioctl(struct file *fd, unsigned int cmd, unsigned long ua)
{
struct crystalhd_adp *adp = chd_get_adp();
crystalhd_cmd_proc cproc;
struct crystalhd_user *uc;
int ret;
if (!adp || !fd) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
}
uc = fd->private_data;
if (!uc) {
BCMLOG_ERR("Failed to get uc\n");
return -ENODATA;
}
mutex_lock(&chd_dec_mutex);
cproc = crystalhd_get_cmd_proc(&adp->cmds, cmd, uc);
if (!cproc) {
BCMLOG_ERR("Unhandled command: %d\n", cmd);
mutex_unlock(&chd_dec_mutex);
return -EINVAL;
}
ret = chd_dec_api_cmd(adp, ua, uc->uid, cmd, cproc);
mutex_unlock(&chd_dec_mutex);
return ret;
}
static int chd_dec_open(struct inode *in, struct file *fd)
{
struct crystalhd_adp *adp = chd_get_adp();
int rc = 0;
enum BC_STATUS sts = BC_STS_SUCCESS;
struct crystalhd_user *uc = NULL;
BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
}
if (adp->cfg_users >= BC_LINK_MAX_OPENS) {
BCMLOG(BCMLOG_INFO, "Already in use.%d\n", adp->cfg_users);
return -EBUSY;
}
sts = crystalhd_user_open(&adp->cmds, &uc);
if (sts != BC_STS_SUCCESS) {
BCMLOG_ERR("cmd_user_open - %d\n", sts);
rc = -EBUSY;
}
adp->cfg_users++;
fd->private_data = uc;
return rc;
}
static int chd_dec_close(struct inode *in, struct file *fd)
{
struct crystalhd_adp *adp = chd_get_adp();
struct crystalhd_user *uc;
BCMLOG_ENTER;
if (!adp) {
BCMLOG_ERR("Invalid adp\n");
return -EINVAL;
}
uc = fd->private_data;
if (!uc) {
BCMLOG_ERR("Failed to get uc\n");
return -ENODATA;
}
crystalhd_user_close(&adp->cmds, uc);
adp->cfg_users--;
return 0;
}
static const struct file_operations chd_dec_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = chd_dec_ioctl,
.open = chd_dec_open,
.release = chd_dec_close,
.llseek = noop_llseek,
};
static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp)
{
struct crystalhd_ioctl_data *temp;
struct device *dev;
int rc = -ENODEV, i = 0;
if (!adp)
goto fail;
adp->chd_dec_major = register_chrdev(0, CRYSTALHD_API_NAME,
&chd_dec_fops);
if (adp->chd_dec_major < 0) {
BCMLOG_ERR("Failed to create config dev\n");
rc = adp->chd_dec_major;
goto fail;
}
/* register crystalhd class */
crystalhd_class = class_create(THIS_MODULE, "crystalhd");
if (IS_ERR(crystalhd_class)) {
BCMLOG_ERR("failed to create class\n");
goto fail;
}
dev = device_create(crystalhd_class, NULL, MKDEV(adp->chd_dec_major, 0),
NULL, "crystalhd");
if (IS_ERR(dev)) {
BCMLOG_ERR("failed to create device\n");
goto device_create_fail;
}
rc = crystalhd_create_elem_pool(adp, BC_LINK_ELEM_POOL_SZ);
if (rc) {
BCMLOG_ERR("failed to create device\n");
goto elem_pool_fail;
}
/* Allocate general purpose ioctl pool. */
for (i = 0; i < CHD_IODATA_POOL_SZ; i++) {
temp = kzalloc(sizeof(struct crystalhd_ioctl_data), GFP_KERNEL);
if (!temp) {
BCMLOG_ERR("ioctl data pool kzalloc failed\n");
rc = -ENOMEM;
goto kzalloc_fail;
}
/* Add to global pool.. */
chd_dec_free_iodata(adp, temp, 0);
}
return 0;
kzalloc_fail:
crystalhd_delete_elem_pool(adp);
elem_pool_fail:
device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0));
device_create_fail:
class_destroy(crystalhd_class);
fail:
return rc;
}
static void __devexit chd_dec_release_chdev(struct crystalhd_adp *adp)
{
struct crystalhd_ioctl_data *temp = NULL;
if (!adp)
return;
if (adp->chd_dec_major > 0) {
/* unregister crystalhd class */
device_destroy(crystalhd_class, MKDEV(adp->chd_dec_major, 0));
unregister_chrdev(adp->chd_dec_major, CRYSTALHD_API_NAME);
BCMLOG(BCMLOG_INFO, "released api device - %d\n",
adp->chd_dec_major);
class_destroy(crystalhd_class);
}
adp->chd_dec_major = 0;
/* Clear iodata pool.. */
do {
temp = chd_dec_alloc_iodata(adp, 0);
kfree(temp);
} while (temp);
crystalhd_delete_elem_pool(adp);
}
static int __devinit chd_pci_reserve_mem(struct crystalhd_adp *pinfo)
{
int rc;
unsigned long bar2 = pci_resource_start(pinfo->pdev, 2);
uint32_t mem_len = pci_resource_len(pinfo->pdev, 2);
unsigned long bar0 = pci_resource_start(pinfo->pdev, 0);
uint32_t i2o_len = pci_resource_len(pinfo->pdev, 0);
BCMLOG(BCMLOG_SSTEP, "bar2:0x%lx-0x%08x bar0:0x%lx-0x%08x\n",
bar2, mem_len, bar0, i2o_len);
rc = check_mem_region(bar2, mem_len);
if (rc) {
BCMLOG_ERR("No valid mem region...\n");
return -ENOMEM;
}
pinfo->addr = ioremap_nocache(bar2, mem_len);
if (!pinfo->addr) {
BCMLOG_ERR("Failed to remap mem region...\n");
return -ENOMEM;
}
pinfo->pci_mem_start = bar2;
pinfo->pci_mem_len = mem_len;
rc = check_mem_region(bar0, i2o_len);
if (rc) {
BCMLOG_ERR("No valid mem region...\n");
return -ENOMEM;
}
pinfo->i2o_addr = ioremap_nocache(bar0, i2o_len);
if (!pinfo->i2o_addr) {
BCMLOG_ERR("Failed to remap mem region...\n");
return -ENOMEM;
}
pinfo->pci_i2o_start = bar0;
pinfo->pci_i2o_len = i2o_len;
rc = pci_request_regions(pinfo->pdev, pinfo->name);
if (rc < 0) {
BCMLOG_ERR("Region request failed: %d\n", rc);
return rc;
}
BCMLOG(BCMLOG_SSTEP, "Mapped addr:0x%08lx i2o_addr:0x%08lx\n",
(unsigned long)pinfo->addr, (unsigned long)pinfo->i2o_addr);
return 0;
}
static void __devexit chd_pci_release_mem(struct crystalhd_adp *pinfo)
{
if (!pinfo)
return;
if (pinfo->addr)
iounmap(pinfo->addr);
if (pinfo->i2o_addr)
iounmap(pinfo->i2o_addr);
pci_release_regions(pinfo->pdev);
}
static void __devexit chd_dec_pci_remove(struct pci_dev *pdev)
{
struct crystalhd_adp *pinfo;
enum BC_STATUS sts = BC_STS_SUCCESS;
BCMLOG_ENTER;
pinfo = pci_get_drvdata(pdev);
if (!pinfo) {
BCMLOG_ERR("could not get adp\n");
return;
}
sts = crystalhd_delete_cmd_context(&pinfo->cmds);
if (sts != BC_STS_SUCCESS)
BCMLOG_ERR("cmd delete :%d\n", sts);
chd_dec_release_chdev(pinfo);
chd_dec_disable_int(pinfo);
chd_pci_release_mem(pinfo);
pci_disable_device(pinfo->pdev);
kfree(pinfo);
g_adp_info = NULL;
}
static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *entry)
{
struct crystalhd_adp *pinfo;
int rc;
enum BC_STATUS sts = BC_STS_SUCCESS;
BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
"s_vendor:0x%04x s_device: 0x%04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_KERNEL);
if (!pinfo) {
BCMLOG_ERR("Failed to allocate memory\n");
return -ENOMEM;
}
pinfo->pdev = pdev;
rc = pci_enable_device(pdev);
if (rc) {
BCMLOG_ERR("Failed to enable PCI device\n");
goto err;
}
snprintf(pinfo->name, sizeof(pinfo->name), "crystalhd_pci_e:%d:%d:%d",
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
rc = chd_pci_reserve_mem(pinfo);
if (rc) {
BCMLOG_ERR("Failed to setup memory regions.\n");
pci_disable_device(pdev);
rc = -ENOMEM;
goto err;
}
pinfo->present = 1;
pinfo->drv_data = entry->driver_data;
/* Setup adapter level lock.. */
spin_lock_init(&pinfo->lock);
/* setup api stuff.. */
chd_dec_init_chdev(pinfo);
rc = chd_dec_enable_int(pinfo);
if (rc) {
BCMLOG_ERR("_enable_int err:%d\n", rc);
pci_disable_device(pdev);
rc = -ENODEV;
goto err;
}
/* Set dma mask... */
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
pinfo->dmabits = 64;
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
pinfo->dmabits = 32;
} else {
BCMLOG_ERR("Unabled to setup DMA %d\n", rc);
pci_disable_device(pdev);
rc = -ENODEV;
goto err;
}
sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
if (sts != BC_STS_SUCCESS) {
BCMLOG_ERR("cmd setup :%d\n", sts);
pci_disable_device(pdev);
rc = -ENODEV;
goto err;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, pinfo);
g_adp_info = pinfo;
return 0;
err:
kfree(pinfo);
return rc;
}
#ifdef CONFIG_PM
int chd_dec_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct crystalhd_adp *adp;
struct crystalhd_ioctl_data *temp;
enum BC_STATUS sts = BC_STS_SUCCESS;
adp = pci_get_drvdata(pdev);
if (!adp) {
BCMLOG_ERR("could not get adp\n");
return -ENODEV;
}
temp = chd_dec_alloc_iodata(adp, false);
if (!temp) {
BCMLOG_ERR("could not get ioctl data\n");
return -ENODEV;
}
sts = crystalhd_suspend(&adp->cmds, temp);
if (sts != BC_STS_SUCCESS) {
BCMLOG_ERR("BCM70012 Suspend %d\n", sts);
return -ENODEV;
}
chd_dec_free_iodata(adp, temp, false);
chd_dec_disable_int(adp);
pci_save_state(pdev);
/* Disable IO/bus master/irq router */
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
int chd_dec_pci_resume(struct pci_dev *pdev)
{
struct crystalhd_adp *adp;
enum BC_STATUS sts = BC_STS_SUCCESS;
int rc;
adp = pci_get_drvdata(pdev);
if (!adp) {
BCMLOG_ERR("could not get adp\n");
return -ENODEV;
}
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* device's irq possibly is changed, driver should take care */
if (pci_enable_device(pdev)) {
BCMLOG_ERR("Failed to enable PCI device\n");
return 1;
}
pci_set_master(pdev);
rc = chd_dec_enable_int(adp);
if (rc) {
BCMLOG_ERR("_enable_int err:%d\n", rc);
pci_disable_device(pdev);
return -ENODEV;
}
sts = crystalhd_resume(&adp->cmds);
if (sts != BC_STS_SUCCESS) {
BCMLOG_ERR("BCM70012 Resume %d\n", sts);
pci_disable_device(pdev);
return -ENODEV;
}
return 0;
}
#endif
static DEFINE_PCI_DEVICE_TABLE(chd_dec_pci_id_table) = {
{ PCI_VDEVICE(BROADCOM, 0x1612), 8 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, chd_dec_pci_id_table);
static struct pci_driver bc_chd_70012_driver = {
.name = "Broadcom 70012 Decoder",
.probe = chd_dec_pci_probe,
.remove = __devexit_p(chd_dec_pci_remove),
.id_table = chd_dec_pci_id_table,
#ifdef CONFIG_PM
.suspend = chd_dec_pci_suspend,
.resume = chd_dec_pci_resume
#endif
};
void chd_set_log_level(struct crystalhd_adp *adp, char *arg)
{
if ((!arg) || (strlen(arg) < 3))
g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA;
else if (!strncmp(arg, "sstep", 5))
g_linklog_level = BCMLOG_INFO | BCMLOG_DATA | BCMLOG_DBG |
BCMLOG_SSTEP | BCMLOG_ERROR;
else if (!strncmp(arg, "info", 4))
g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO;
else if (!strncmp(arg, "debug", 5))
g_linklog_level = BCMLOG_ERROR | BCMLOG_DATA | BCMLOG_INFO |
BCMLOG_DBG;
else if (!strncmp(arg, "pball", 5))
g_linklog_level = 0xFFFFFFFF & ~(BCMLOG_SPINLOCK);
else if (!strncmp(arg, "silent", 6))
g_linklog_level = 0;
else
g_linklog_level = 0;
}
struct crystalhd_adp *chd_get_adp(void)
{
return g_adp_info;
}
static int __init chd_dec_module_init(void)
{
int rc;
chd_set_log_level(NULL, "debug");
BCMLOG(BCMLOG_DATA, "Loading crystalhd %d.%d.%d\n",
crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
rc = pci_register_driver(&bc_chd_70012_driver);
if (rc < 0)
BCMLOG_ERR("Could not find any devices. err:%d\n", rc);
return rc;
}
module_init(chd_dec_module_init);
static void __exit chd_dec_module_cleanup(void)
{
BCMLOG(BCMLOG_DATA, "unloading crystalhd %d.%d.%d\n",
crystalhd_kmod_major, crystalhd_kmod_minor, crystalhd_kmod_rev);
pci_unregister_driver(&bc_chd_70012_driver);
}
module_exit(chd_dec_module_cleanup);
MODULE_AUTHOR("Naren Sankar <nsankar@broadcom.com>");
MODULE_AUTHOR("Prasad Bolisetty <prasadb@broadcom.com>");
MODULE_DESCRIPTION(CRYSTAL_HD_NAME);
MODULE_LICENSE("GPL");
MODULE_ALIAS("bcm70012");
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_mapphone_kexec | fs/notify/fsnotify.c | 3179 | 9483 | /*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dcache.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
/*
* Clear all of the marks on an inode when it is being evicted from core
*/
void __fsnotify_inode_delete(struct inode *inode)
{
fsnotify_clear_marks_by_inode(inode);
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
fsnotify_clear_marks_by_mount(mnt);
}
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
* on a child we run all of our children and set a dentry flag saying that the
* parent cares. Thus when an event happens on a child it can quickly tell if
* if there is a need to find a parent and send the event to the parent.
*/
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
struct dentry *alias;
int watched;
if (!S_ISDIR(inode->i_mode))
return;
/* determine if the children should tell inode about their events */
watched = fsnotify_inode_watches_children(inode);
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
* d_flags to indicate parental interest (their parent is the
* original inode) */
spin_lock(&alias->d_lock);
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
if (!child->d_inode)
continue;
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
if (watched)
child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
else
child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
spin_unlock(&alias->d_lock);
}
spin_unlock(&inode->i_lock);
}
/* Notify this dentry's parent about a child's events. */
int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
int ret = 0;
if (!dentry)
dentry = path->dentry;
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return 0;
parent = dget_parent(dentry);
p_inode = parent->d_inode;
if (unlikely(!fsnotify_inode_watches_children(p_inode)))
__fsnotify_update_child_dentry_flags(p_inode);
else if (p_inode->i_fsnotify_mask & mask) {
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
if (path)
ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
dentry->d_name.name, 0);
else
ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
dentry->d_name.name, 0);
}
dput(parent);
return ret;
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data,
int data_is, u32 cookie,
const unsigned char *file_name,
struct fsnotify_event **event)
{
struct fsnotify_group *group = NULL;
__u32 inode_test_mask = 0;
__u32 vfsmount_test_mask = 0;
if (unlikely(!inode_mark && !vfsmount_mark)) {
BUG();
return 0;
}
/* clear ignored on inode modification */
if (mask & FS_MODIFY) {
if (inode_mark &&
!(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
inode_mark->ignored_mask = 0;
if (vfsmount_mark &&
!(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
vfsmount_mark->ignored_mask = 0;
}
/* does the inode mark tell us to do something? */
if (inode_mark) {
group = inode_mark->group;
inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
inode_test_mask &= inode_mark->mask;
inode_test_mask &= ~inode_mark->ignored_mask;
}
/* does the vfsmount_mark tell us to do something? */
if (vfsmount_mark) {
vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
group = vfsmount_mark->group;
vfsmount_test_mask &= vfsmount_mark->mask;
vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
if (inode_mark)
vfsmount_test_mask &= ~inode_mark->ignored_mask;
}
pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p"
" inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
" data=%p data_is=%d cookie=%d event=%p\n",
__func__, group, to_tell, mnt, mask, inode_mark,
inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
data_is, cookie, *event);
if (!inode_test_mask && !vfsmount_test_mask)
return 0;
if (group->ops->should_send_event(group, to_tell, inode_mark,
vfsmount_mark, mask, data,
data_is) == false)
return 0;
if (!*event) {
*event = fsnotify_create_event(to_tell, mask, data,
data_is, file_name,
cookie, GFP_KERNEL);
if (!*event)
return -ENOMEM;
}
return group->ops->handle_event(group, inode_mark, vfsmount_mark, *event);
}
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{
struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
struct fsnotify_group *inode_group, *vfsmount_group;
struct fsnotify_event *event = NULL;
struct vfsmount *mnt;
int idx, ret = 0;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
if (data_is == FSNOTIFY_EVENT_PATH)
mnt = ((struct path *)data)->mnt;
else
mnt = NULL;
/*
* if this is a modify event we may need to clear the ignored masks
* otherwise return if neither the inode nor the vfsmount care about
* this type of event.
*/
if (!(mask & FS_MODIFY) &&
!(test_mask & to_tell->i_fsnotify_mask) &&
!(mnt && test_mask & mnt->mnt_fsnotify_mask))
return 0;
idx = srcu_read_lock(&fsnotify_mark_srcu);
if ((mask & FS_MODIFY) ||
(test_mask & to_tell->i_fsnotify_mask))
inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
&fsnotify_mark_srcu);
if (mnt && ((mask & FS_MODIFY) ||
(test_mask & mnt->mnt_fsnotify_mask))) {
vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
&fsnotify_mark_srcu);
inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
&fsnotify_mark_srcu);
}
while (inode_node || vfsmount_node) {
inode_group = vfsmount_group = NULL;
if (inode_node) {
inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
struct fsnotify_mark, i.i_list);
inode_group = inode_mark->group;
}
if (vfsmount_node) {
vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
struct fsnotify_mark, m.m_list);
vfsmount_group = vfsmount_mark->group;
}
if (inode_group > vfsmount_group) {
/* handle inode */
ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
data_is, cookie, file_name, &event);
/* we didn't use the vfsmount_mark */
vfsmount_group = NULL;
} else if (vfsmount_group > inode_group) {
ret = send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
data_is, cookie, file_name, &event);
inode_group = NULL;
} else {
ret = send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
mask, data, data_is, cookie, file_name,
&event);
}
if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
goto out;
if (inode_group)
inode_node = srcu_dereference(inode_node->next,
&fsnotify_mark_srcu);
if (vfsmount_group)
vfsmount_node = srcu_dereference(vfsmount_node->next,
&fsnotify_mark_srcu);
}
ret = 0;
out:
srcu_read_unlock(&fsnotify_mark_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
* up while we are still trying to add it to lists, drop that one.
*/
if (event)
fsnotify_put_event(event);
return ret;
}
EXPORT_SYMBOL_GPL(fsnotify);
static __init int fsnotify_init(void)
{
int ret;
BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
panic("initializing fsnotify_mark_srcu");
return 0;
}
core_initcall(fsnotify_init);
| gpl-2.0 |
CMyst/android_kernel_htc_msm8960 | drivers/video/msm/mddi_ext_lcd.c | 3691 | 2185 | /* Copyright (c) 2008-2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "msm_fb.h"
#include "mddihost.h"
#include "mddihosti.h"
static int mddi_ext_lcd_on(struct platform_device *pdev);
static int mddi_ext_lcd_off(struct platform_device *pdev);
static int mddi_ext_lcd_on(struct platform_device *pdev)
{
return 0;
}
static int mddi_ext_lcd_off(struct platform_device *pdev)
{
return 0;
}
static int __init mddi_ext_lcd_probe(struct platform_device *pdev)
{
msm_fb_add_device(pdev);
return 0;
}
static struct platform_driver this_driver = {
.probe = mddi_ext_lcd_probe,
.driver = {
.name = "extmddi_svga",
},
};
static struct msm_fb_panel_data mddi_ext_lcd_panel_data = {
.panel_info.xres = 800,
.panel_info.yres = 600,
.panel_info.mode2_xres = 0;
.panel_info.mode2_yres = 0;
.panel_info.mode2_bpp = 0;
.panel_info.type = EXT_MDDI_PANEL,
.panel_info.pdest = DISPLAY_1,
.panel_info.wait_cycle = 0,
.panel_info.bpp = 18,
.panel_info.fb_num = 2,
.panel_info.clk_rate = 122880000,
.panel_info.clk_min = 120000000,
.panel_info.clk_max = 125000000,
.on = mddi_ext_lcd_on,
.off = mddi_ext_lcd_off,
};
static struct platform_device this_device = {
.name = "extmddi_svga",
.id = 0,
.dev = {
.platform_data = &mddi_ext_lcd_panel_data,
}
};
static int __init mddi_ext_lcd_init(void)
{
int ret;
struct msm_panel_info *pinfo;
ret = platform_driver_register(&this_driver);
if (!ret) {
pinfo = &mddi_ext_lcd_panel_data.panel_info;
pinfo->lcd.vsync_enable = FALSE;
pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
ret = platform_device_register(&this_device);
if (ret)
platform_driver_unregister(&this_driver);
}
return ret;
}
module_init(mddi_ext_lcd_init);
| gpl-2.0 |
RenderBroken/OPO-CAF-kernel | drivers/net/can/sja1000/sja1000_of_platform.c | 3691 | 5931 | /*
* Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
*
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
* bus found on embedded PowerPC systems. You need a SJA1000 CAN node
* definition in your flattened device tree source (DTS) file similar to:
*
* can@3,100 {
* compatible = "nxp,sja1000";
* reg = <3 0x100 0x80>;
* interrupts = <2 0>;
* interrupt-parent = <&mpic>;
* nxp,external-clock-frequency = <16000000>;
* };
*
* See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
* information.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
#include "sja1000.h"
#define DRV_NAME "sja1000_of_platform"
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
MODULE_LICENSE("GPL v2");
#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
{
return in_8(priv->reg_base + reg);
}
static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
int reg, u8 val)
{
out_8(priv->reg_base + reg, val);
}
static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
{
struct net_device *dev = dev_get_drvdata(&ofdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
struct device_node *np = ofdev->dev.of_node;
struct resource res;
dev_set_drvdata(&ofdev->dev, NULL);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
iounmap(priv->reg_base);
irq_dispose_mapping(dev->irq);
of_address_to_resource(np, 0, &res);
release_mem_region(res.start, resource_size(&res));
return 0;
}
static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
const u32 *prop;
int err, irq, res_size, prop_size;
void __iomem *base;
err = of_address_to_resource(np, 0, &res);
if (err) {
dev_err(&ofdev->dev, "invalid address\n");
return err;
}
res_size = resource_size(&res);
if (!request_mem_region(res.start, res_size, DRV_NAME)) {
dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
return -EBUSY;
}
base = ioremap_nocache(res.start, res_size);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
err = -ENOMEM;
goto exit_release_mem;
}
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no irq found\n");
err = -ENODEV;
goto exit_unmap_mem;
}
dev = alloc_sja1000dev(0);
if (!dev) {
err = -ENOMEM;
goto exit_dispose_irq;
}
priv = netdev_priv(dev);
priv->read_reg = sja1000_ofp_read_reg;
priv->write_reg = sja1000_ofp_write_reg;
prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
if (prop && (prop_size == sizeof(u32)))
priv->can.clock.freq = *prop / 2;
else
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
if (prop && (prop_size == sizeof(u32)))
priv->ocr |= *prop & OCR_MODE_MASK;
else
priv->ocr |= OCR_MODE_NORMAL; /* default */
prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
if (prop && (prop_size == sizeof(u32)))
priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
else
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
if (prop && (prop_size == sizeof(u32)) && *prop) {
u32 divider = priv->can.clock.freq * 2 / *prop;
if (divider > 1)
priv->cdr |= divider / 2 - 1;
else
priv->cdr |= CDR_CLKOUT_MASK;
} else {
priv->cdr |= CDR_CLK_OFF; /* default */
}
prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
if (!prop)
priv->cdr |= CDR_CBP; /* default */
priv->irq_flags = IRQF_SHARED;
priv->reg_base = base;
dev->irq = irq;
dev_info(&ofdev->dev,
"reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
priv->reg_base, dev->irq, priv->can.clock.freq,
priv->ocr, priv->cdr);
dev_set_drvdata(&ofdev->dev, dev);
SET_NETDEV_DEV(dev, &ofdev->dev);
err = register_sja1000dev(dev);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_free_sja1000;
}
return 0;
exit_free_sja1000:
free_sja1000dev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
exit_release_mem:
release_mem_region(res.start, res_size);
return err;
}
static struct of_device_id __devinitdata sja1000_ofp_table[] = {
{.compatible = "nxp,sja1000"},
{},
};
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
static struct platform_driver sja1000_ofp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = sja1000_ofp_table,
},
.probe = sja1000_ofp_probe,
.remove = __devexit_p(sja1000_ofp_remove),
};
module_platform_driver(sja1000_ofp_driver);
| gpl-2.0 |
CyanideL/android_kernel_samsung_klte | drivers/ata/sata_promise.c | 3947 | 34754 | /*
* sata_promise.c - Promise SATA
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Mikael Pettersson <mikpe@it.uu.se>
* Please ALWAYS copy linux-ide@vger.kernel.org
* on emails.
*
* Copyright 2003-2004 Red Hat, Inc.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware information only available under NDA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include "sata_promise.h"
#define DRV_NAME "sata_promise"
#define DRV_VERSION "2.12"
enum {
PDC_MAX_PORTS = 4,
PDC_MMIO_BAR = 3,
PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
/* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
PDC_FLASH_CTL = 0x44, /* Flash control register */
PDC_PCI_CTL = 0x48, /* PCI control/status reg */
PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */
PDC2_SATA_PLUG_CSR = 0x60, /* SATAII Plug control/status reg */
PDC_TBG_MODE = 0x41C, /* TBG mode (not SATAII) */
PDC_SLEW_CTL = 0x470, /* slew rate control reg (not SATAII) */
/* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */
PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */
PDC_DEVICE = 0x18, /* Device/Head reg (per port) */
PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
PDC_GLOBAL_CTL = 0x48, /* Global control/status (per port) */
PDC_CTLSTAT = 0x60, /* IDE control and status (per port) */
/* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
PDC_SATA_ERROR = 0x04,
PDC_PHYMODE4 = 0x14,
PDC_LINK_LAYER_ERRORS = 0x6C,
PDC_FPDMA_CTLSTAT = 0xD8,
PDC_INTERNAL_DEBUG_1 = 0xF8, /* also used for PATA */
PDC_INTERNAL_DEBUG_2 = 0xFC, /* also used for PATA */
/* PDC_FPDMA_CTLSTAT bit definitions */
PDC_FPDMA_CTLSTAT_RESET = 1 << 3,
PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG = 1 << 10,
PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG = 1 << 11,
/* PDC_GLOBAL_CTL bit definitions */
PDC_PH_ERR = (1 << 8), /* PCI error while loading packet */
PDC_SH_ERR = (1 << 9), /* PCI error while loading S/G table */
PDC_DH_ERR = (1 << 10), /* PCI error while loading data */
PDC2_HTO_ERR = (1 << 12), /* host bus timeout */
PDC2_ATA_HBA_ERR = (1 << 13), /* error during SATA DATA FIS transmission */
PDC2_ATA_DMA_CNT_ERR = (1 << 14), /* DMA DATA FIS size differs from S/G count */
PDC_OVERRUN_ERR = (1 << 19), /* S/G byte count larger than HD requires */
PDC_UNDERRUN_ERR = (1 << 20), /* S/G byte count less than HD requires */
PDC_DRIVE_ERR = (1 << 21), /* drive error */
PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
PDC2_ATA_DMA_CNT_ERR,
PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
PDC1_ERR_MASK | PDC2_ERR_MASK,
board_2037x = 0, /* FastTrak S150 TX2plus */
board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
board_20319 = 2, /* FastTrak S150 TX4 */
board_20619 = 3, /* FastTrak TX4000 */
board_2057x = 4, /* SATAII150 Tx2plus */
board_2057x_pata = 5, /* SATAII150 Tx2plus PATA port */
board_40518 = 6, /* SATAII150 Tx4 */
PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
/* Sequence counter control registers bit definitions */
PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */
/* Feature register values */
PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */
PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */
/* Device/Head register values */
PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */
/* PDC_CTLSTAT bit definitions */
PDC_DMA_ENABLE = (1 << 7),
PDC_IRQ_DISABLE = (1 << 10),
PDC_RESET = (1 << 11), /* HDMA reset */
PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
/* ap->flags bits */
PDC_FLAG_GEN_II = (1 << 24),
PDC_FLAG_SATA_PATA = (1 << 25), /* supports SATA + PATA */
PDC_FLAG_4_PORTS = (1 << 26), /* 4 ports */
};
struct pdc_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
};
static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int pdc_common_port_start(struct ata_port *ap);
static int pdc_sata_port_start(struct ata_port *ap);
static void pdc_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc);
static void pdc_irq_clear(struct ata_port *ap);
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc);
static void pdc_freeze(struct ata_port *ap);
static void pdc_sata_freeze(struct ata_port *ap);
static void pdc_thaw(struct ata_port *ap);
static void pdc_sata_thaw(struct ata_port *ap);
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void pdc_error_handler(struct ata_port *ap);
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
static int pdc_pata_cable_detect(struct ata_port *ap);
static int pdc_sata_cable_detect(struct ata_port *ap);
static struct scsi_host_template pdc_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = PDC_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY,
};
static const struct ata_port_operations pdc_common_ops = {
.inherits = &ata_sff_port_ops,
.sff_tf_load = pdc_tf_load_mmio,
.sff_exec_command = pdc_exec_command_mmio,
.check_atapi_dma = pdc_check_atapi_dma,
.qc_prep = pdc_qc_prep,
.qc_issue = pdc_qc_issue,
.sff_irq_clear = pdc_irq_clear,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
.error_handler = pdc_error_handler,
};
static struct ata_port_operations pdc_sata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = pdc_sata_cable_detect,
.freeze = pdc_sata_freeze,
.thaw = pdc_sata_thaw,
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
and ->freeze/thaw that ignore the hotplug controls. */
static struct ata_port_operations pdc_old_sata_ops = {
.inherits = &pdc_sata_ops,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.check_atapi_dma = pdc_old_sata_check_atapi_dma,
};
static struct ata_port_operations pdc_pata_ops = {
.inherits = &pdc_common_ops,
.cable_detect = pdc_pata_cable_detect,
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
[board_2037x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_2037x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_20319] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_old_sata_ops,
},
[board_20619] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_2057x] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_SATA_PATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
[board_2057x_pata] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS |
PDC_FLAG_GEN_II,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_pata_ops,
},
[board_40518] =
{
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA |
PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &pdc_sata_ops,
},
};
static const struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
{ PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
{ PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
{ PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
{ PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
{ } /* terminate list */
};
static struct pci_driver pdc_ata_pci_driver = {
.name = DRV_NAME,
.id_table = pdc_ata_pci_tbl,
.probe = pdc_ata_init_one,
.remove = ata_pci_remove_one,
};
static int pdc_common_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
/* we use the same prd table as bmdma, allocate it */
rc = ata_bmdma_port_start(ap);
if (rc)
return rc;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
ap->private_data = pp;
return 0;
}
static int pdc_sata_port_start(struct ata_port *ap)
{
int rc;
rc = pdc_common_port_start(ap);
if (rc)
return rc;
/* fix up PHYMODE4 align timing */
if (ap->flags & PDC_FLAG_GEN_II) {
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int tmp;
tmp = readl(sata_mmio + PDC_PHYMODE4);
tmp = (tmp & ~3) | 1; /* set bits 1:0 = 0:1 */
writel(tmp, sata_mmio + PDC_PHYMODE4);
}
return 0;
}
static void pdc_fpdma_clear_interrupt_flag(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u32 tmp;
tmp = readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp |= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG;
tmp |= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG;
/* It's not allowed to write to the entire FPDMA_CTLSTAT register
when NCQ is running. So do a byte-sized write to bits 10 and 11. */
writeb(tmp >> 8, sata_mmio + PDC_FPDMA_CTLSTAT + 1);
readb(sata_mmio + PDC_FPDMA_CTLSTAT + 1); /* flush */
}
static void pdc_fpdma_reset(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
u8 tmp;
tmp = (u8)readl(sata_mmio + PDC_FPDMA_CTLSTAT);
tmp &= 0x7F;
tmp |= PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
udelay(100);
tmp &= ~PDC_FPDMA_CTLSTAT_RESET;
writeb(tmp, sata_mmio + PDC_FPDMA_CTLSTAT);
readl(sata_mmio + PDC_FPDMA_CTLSTAT); /* flush */
pdc_fpdma_clear_interrupt_flag(ap);
}
static void pdc_not_at_command_packet_phase(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
unsigned int i;
u32 tmp;
/* check not at ASIC packet command phase */
for (i = 0; i < 100; ++i) {
writel(0, sata_mmio + PDC_INTERNAL_DEBUG_1);
tmp = readl(sata_mmio + PDC_INTERNAL_DEBUG_2);
if ((tmp & 0xF) != 1)
break;
udelay(100);
}
}
static void pdc_clear_internal_debug_record_error_register(struct ata_port *ap)
{
void __iomem *sata_mmio = ap->ioaddr.scr_addr;
writel(0xffffffff, sata_mmio + PDC_SATA_ERROR);
writel(0xffff0000, sata_mmio + PDC_LINK_LAYER_ERRORS);
}
static void pdc_reset_port(struct ata_port *ap)
{
void __iomem *ata_ctlstat_mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int i;
u32 tmp;
if (ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(ap);
tmp = readl(ata_ctlstat_mmio);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
for (i = 11; i > 0; i--) {
tmp = readl(ata_ctlstat_mmio);
if (tmp & PDC_RESET)
break;
udelay(100);
tmp |= PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
}
tmp &= ~PDC_RESET;
writel(tmp, ata_ctlstat_mmio);
readl(ata_ctlstat_mmio); /* flush */
if (sata_scr_valid(&ap->link) && (ap->flags & PDC_FLAG_GEN_II)) {
pdc_fpdma_reset(ap);
pdc_clear_internal_debug_record_error_register(ap);
}
}
static int pdc_pata_cable_detect(struct ata_port *ap)
{
u8 tmp;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
tmp = readb(ata_mmio + PDC_CTLSTAT + 3);
if (tmp & 0x01)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
static int pdc_sata_cable_detect(struct ata_port *ap)
{
return ATA_CBL_SATA;
}
static int pdc_sata_scr_read(struct ata_link *link,
unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static int pdc_sata_scr_write(struct ata_link *link,
unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
return 0;
}
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
dma_addr_t sg_table = ap->bmdma_prd_dma;
unsigned int cdb_len = qc->dev->cdb_len;
u8 *cdb = qc->cdb;
struct pdc_port_priv *pp = ap->private_data;
u8 *buf = pp->pkt;
__le32 *buf32 = (__le32 *) buf;
unsigned int dev_sel, feature;
/* set control bits (byte 0), zero delay seq id (byte 3),
* and seq id (byte 2)
*/
switch (qc->tf.protocol) {
case ATAPI_PROT_DMA:
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
buf32[0] = cpu_to_le32(PDC_PKT_READ);
else
buf32[0] = 0;
break;
case ATAPI_PROT_NODATA:
buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
break;
default:
BUG();
break;
}
buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
buf32[2] = 0; /* no next-packet */
/* select drive */
if (sata_scr_valid(&ap->link))
dev_sel = PDC_DEVICE_SATA;
else
dev_sel = qc->tf.device;
buf[12] = (1 << 5) | ATA_REG_DEVICE;
buf[13] = dev_sel;
buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
buf[15] = dev_sel; /* once more, waiting for BSY to clear */
buf[16] = (1 << 5) | ATA_REG_NSECT;
buf[17] = qc->tf.nsect;
buf[18] = (1 << 5) | ATA_REG_LBAL;
buf[19] = qc->tf.lbal;
/* set feature and byte counter registers */
if (qc->tf.protocol != ATAPI_PROT_DMA)
feature = PDC_FEATURE_ATAPI_PIO;
else
feature = PDC_FEATURE_ATAPI_DMA;
buf[20] = (1 << 5) | ATA_REG_FEATURE;
buf[21] = feature;
buf[22] = (1 << 5) | ATA_REG_BYTEL;
buf[23] = qc->tf.lbam;
buf[24] = (1 << 5) | ATA_REG_BYTEH;
buf[25] = qc->tf.lbah;
/* send ATAPI packet command 0xA0 */
buf[26] = (1 << 5) | ATA_REG_CMD;
buf[27] = qc->tf.command;
/* select drive and check DRQ */
buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
buf[29] = dev_sel;
/* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
BUG_ON(cdb_len & ~0x1E);
/* append the CDB as the final part */
buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
memcpy(buf+31, cdb, cdb_len);
}
/**
* pdc_fill_sg - Fill PCI IDE PRD table
* @qc: Metadata associated with taskfile to be transferred
*
* Fill PCI IDE PRD (scatter-gather) table with segments
* associated with the current disk command.
* Make sure hardware does not choke on it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
*/
static void pdc_fill_sg(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_bmdma_prd *prd = ap->bmdma_prd;
struct scatterlist *sg;
const u32 SG_COUNT_ASIC_BUG = 41*4;
unsigned int si, idx;
u32 len;
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, offset;
u32 sg_len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
offset = addr & 0xffff;
len = sg_len;
if ((offset + sg_len) > 0x10000)
len = 0x10000 - offset;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
sg_len -= len;
addr += len;
}
}
len = le32_to_cpu(prd[idx - 1].flags_len);
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
VPRINTK("Splitting last PRD.\n");
addr = le32_to_cpu(prd[idx - 1].addr);
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len);
VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
static void pdc_qc_prep(struct ata_queued_cmd *qc)
{
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
VPRINTK("ENTER\n");
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
i = pdc_pkt_header(&qc->tf, qc->ap->bmdma_prd_dma,
qc->dev->devno, pp->pkt);
if (qc->tf.flags & ATA_TFLAG_LBA48)
i = pdc_prep_lba48(&qc->tf, pp->pkt, i);
else
i = pdc_prep_lba28(&qc->tf, pp->pkt, i);
pdc_pkt_footer(&qc->tf, pp->pkt, i);
break;
case ATAPI_PROT_PIO:
pdc_fill_sg(qc);
break;
case ATAPI_PROT_DMA:
pdc_fill_sg(qc);
/*FALLTHROUGH*/
case ATAPI_PROT_NODATA:
pdc_atapi_pkt(qc);
break;
default:
break;
}
}
static int pdc_is_sataii_tx4(unsigned long flags)
{
const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
return (flags & mask) == mask;
}
static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
int is_sataii_tx4)
{
static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
}
static unsigned int pdc_sata_nr_ports(const struct ata_port *ap)
{
return (ap->flags & PDC_FLAG_4_PORTS) ? 4 : 2;
}
static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port *ap)
{
const struct ata_host *host = ap->host;
unsigned int nr_ports = pdc_sata_nr_ports(ap);
unsigned int i;
for (i = 0; i < nr_ports && host->ports[i] != ap; ++i)
;
BUG_ON(i >= nr_ports);
return pdc_port_no_to_ata_no(i, pdc_is_sataii_tx4(ap->flags));
}
static void pdc_freeze(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp |= PDC_IRQ_DISABLE;
tmp &= ~PDC_DMA_ENABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_freeze(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
/* Disable hotplug events on this port.
*
* Locking:
* 1) hotplug register accesses must be serialised via host->lock
* 2) ap->lock == &ap->host->lock
* 3) ->freeze() and ->thaw() are called with ap->lock held
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << (ata_no + 16);
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
pdc_freeze(ap);
}
static void pdc_thaw(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 tmp;
/* clear IRQ */
readl(ata_mmio + PDC_COMMAND);
/* turn IRQ back on */
tmp = readl(ata_mmio + PDC_CTLSTAT);
tmp &= ~PDC_IRQ_DISABLE;
writel(tmp, ata_mmio + PDC_CTLSTAT);
readl(ata_mmio + PDC_CTLSTAT); /* flush */
}
static void pdc_sata_thaw(struct ata_port *ap)
{
struct ata_host *host = ap->host;
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
unsigned int hotplug_offset = PDC2_SATA_PLUG_CSR;
unsigned int ata_no = pdc_sata_ata_port_to_ata_no(ap);
u32 hotplug_status;
pdc_thaw(ap);
/* Enable hotplug events on this port.
* Locking: see pdc_sata_freeze().
*/
hotplug_status = readl(host_mmio + hotplug_offset);
hotplug_status |= 0x11 << ata_no;
hotplug_status &= ~(0x11 << (ata_no + 16));
writel(hotplug_status, host_mmio + hotplug_offset);
readl(host_mmio + hotplug_offset); /* flush */
}
static int pdc_pata_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
pdc_reset_port(link->ap);
return ata_sff_softreset(link, class, deadline);
}
static unsigned int pdc_ata_port_to_ata_no(const struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
/* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
return (ata_mmio - host_mmio - 0x200) / 0x80;
}
static void pdc_hard_reset_port(struct ata_port *ap)
{
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
u8 tmp;
spin_lock(&ap->host->lock);
tmp = readb(pcictl_b1_mmio);
tmp &= ~(0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
udelay(100);
tmp |= (0x10 << ata_no);
writeb(tmp, pcictl_b1_mmio);
readb(pcictl_b1_mmio); /* flush */
spin_unlock(&ap->host->lock);
}
static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
if (link->ap->flags & PDC_FLAG_GEN_II)
pdc_not_at_command_packet_phase(link->ap);
/* hotplug IRQs should have been masked by pdc_sata_freeze() */
pdc_hard_reset_port(link->ap);
pdc_reset_port(link->ap);
/* sata_promise can't reliably acquire the first D2H Reg FIS
* after hardreset. Do non-waiting hardreset and request
* follow-up SRST.
*/
return sata_std_hardreset(link, class, deadline);
}
static void pdc_error_handler(struct ata_port *ap)
{
if (!(ap->pflags & ATA_PFLAG_FROZEN))
pdc_reset_port(ap);
ata_sff_error_handler(ap);
}
static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
if (qc->flags & ATA_QCFLAG_FAILED)
pdc_reset_port(ap);
}
static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
u32 port_status, u32 err_mask)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned int ac_err_mask = 0;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "port_status 0x%08x", port_status);
port_status &= err_mask;
if (port_status & PDC_DRIVE_ERR)
ac_err_mask |= AC_ERR_DEV;
if (port_status & (PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR))
ac_err_mask |= AC_ERR_OTHER;
if (port_status & (PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR))
ac_err_mask |= AC_ERR_ATA_BUS;
if (port_status & (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC2_HTO_ERR
| PDC_PCI_SYS_ERR | PDC1_PCI_PARITY_ERR))
ac_err_mask |= AC_ERR_HOST_BUS;
if (sata_scr_valid(&ap->link)) {
u32 serror;
pdc_sata_scr_read(&ap->link, SCR_ERROR, &serror);
ehi->serror |= serror;
}
qc->err_mask |= ac_err_mask;
pdc_reset_port(ap);
ata_port_abort(ap);
}
static unsigned int pdc_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc)
{
unsigned int handled = 0;
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
u32 port_status, err_mask;
err_mask = PDC_ERR_MASK;
if (ap->flags & PDC_FLAG_GEN_II)
err_mask &= ~PDC1_ERR_MASK;
else
err_mask &= ~PDC2_ERR_MASK;
port_status = readl(ata_mmio + PDC_GLOBAL_CTL);
if (unlikely(port_status & err_mask)) {
pdc_error_intr(ap, qc, port_status, err_mask);
return 1;
}
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
case ATA_PROT_NODATA:
case ATAPI_PROT_DMA:
case ATAPI_PROT_NODATA:
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
ata_qc_complete(qc);
handled = 1;
break;
default:
ap->stats.idle_irq++;
break;
}
return handled;
}
static void pdc_irq_clear(struct ata_port *ap)
{
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
readl(ata_mmio + PDC_COMMAND);
}
static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
unsigned int handled = 0;
void __iomem *host_mmio;
unsigned int hotplug_offset, ata_no;
u32 hotplug_status;
int is_sataii_tx4;
VPRINTK("ENTER\n");
if (!host || !host->iomap[PDC_MMIO_BAR]) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
host_mmio = host->iomap[PDC_MMIO_BAR];
spin_lock(&host->lock);
/* read and clear hotplug flags for all ports */
if (host->ports[0]->flags & PDC_FLAG_GEN_II) {
hotplug_offset = PDC2_SATA_PLUG_CSR;
hotplug_status = readl(host_mmio + hotplug_offset);
if (hotplug_status & 0xff)
writel(hotplug_status | 0xff, host_mmio + hotplug_offset);
hotplug_status &= 0xff; /* clear uninteresting bits */
} else
hotplug_status = 0;
/* reading should also clear interrupts */
mask = readl(host_mmio + PDC_INT_SEQMASK);
if (mask == 0xffffffff && hotplug_status == 0) {
VPRINTK("QUICK EXIT 2\n");
goto done_irq;
}
mask &= 0xffff; /* only 16 SEQIDs possible */
if (mask == 0 && hotplug_status == 0) {
VPRINTK("QUICK EXIT 3\n");
goto done_irq;
}
writel(mask, host_mmio + PDC_INT_SEQMASK);
is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
for (i = 0; i < host->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host->ports[i];
/* check for a plug or unplug event */
ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
tmp = hotplug_status & (0x11 << ata_no);
if (tmp) {
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, "hotplug_status %#x", tmp);
ata_port_freeze(ap);
++handled;
continue;
}
/* check for a packet interrupt */
tmp = mask & (1 << (i + 1));
if (tmp) {
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += pdc_host_intr(ap, qc);
}
}
VPRINTK("EXIT\n");
done_irq:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void pdc_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
void __iomem *ata_mmio = ap->ioaddr.cmd_addr;
unsigned int port_no = ap->port_no;
u8 seq = (u8) (port_no + 1);
VPRINTK("ENTER, ap %p\n", ap);
writel(0x00000001, host_mmio + (seq * 4));
readl(host_mmio + (seq * 4)); /* flush */
pp->pkt[2] = seq;
wmb(); /* flush PRD, pkt writes */
writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT);
readl(ata_mmio + PDC_PKT_SUBMIT); /* flush */
}
static unsigned int pdc_qc_issue(struct ata_queued_cmd *qc)
{
switch (qc->tf.protocol) {
case ATAPI_PROT_NODATA:
if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
break;
/*FALLTHROUGH*/
case ATA_PROT_NODATA:
if (qc->tf.flags & ATA_TFLAG_POLLING)
break;
/*FALLTHROUGH*/
case ATAPI_PROT_DMA:
case ATA_PROT_DMA:
pdc_packet_start(qc);
return 0;
default:
break;
}
return ata_sff_qc_issue(qc);
}
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap,
const struct ata_taskfile *tf)
{
WARN_ON(tf->protocol == ATA_PROT_DMA || tf->protocol == ATAPI_PROT_DMA);
ata_sff_exec_command(ap, tf);
}
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
{
u8 *scsicmd = qc->scsicmd->cmnd;
int pio = 1; /* atapi dma off by default */
/* Whitelist commands that may use DMA. */
switch (scsicmd[0]) {
case WRITE_12:
case WRITE_10:
case WRITE_6:
case READ_12:
case READ_10:
case READ_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
pio = 0;
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
unsigned int lba =
(scsicmd[2] << 24) |
(scsicmd[3] << 16) |
(scsicmd[4] << 8) |
scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
return pio;
}
static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd *qc)
{
/* First generation chips cannot use ATAPI DMA on SATA ports */
return 1;
}
static void pdc_ata_setup_port(struct ata_port *ap,
void __iomem *base, void __iomem *scr_addr)
{
ap->ioaddr.cmd_addr = base;
ap->ioaddr.data_addr = base;
ap->ioaddr.feature_addr =
ap->ioaddr.error_addr = base + 0x4;
ap->ioaddr.nsect_addr = base + 0x8;
ap->ioaddr.lbal_addr = base + 0xc;
ap->ioaddr.lbam_addr = base + 0x10;
ap->ioaddr.lbah_addr = base + 0x14;
ap->ioaddr.device_addr = base + 0x18;
ap->ioaddr.command_addr =
ap->ioaddr.status_addr = base + 0x1c;
ap->ioaddr.altstatus_addr =
ap->ioaddr.ctl_addr = base + 0x38;
ap->ioaddr.scr_addr = scr_addr;
}
static void pdc_host_init(struct ata_host *host)
{
void __iomem *host_mmio = host->iomap[PDC_MMIO_BAR];
int is_gen2 = host->ports[0]->flags & PDC_FLAG_GEN_II;
int hotplug_offset;
u32 tmp;
if (is_gen2)
hotplug_offset = PDC2_SATA_PLUG_CSR;
else
hotplug_offset = PDC_SATA_PLUG_CSR;
/*
* Except for the hotplug stuff, this is voodoo from the
* Promise driver. Label this entire section
* "TODO: figure out why we do this"
*/
/* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
tmp = readl(host_mmio + PDC_FLASH_CTL);
tmp |= 0x02000; /* bit 13 (enable bmr burst) */
if (!is_gen2)
tmp |= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
writel(tmp, host_mmio + PDC_FLASH_CTL);
/* clear plug/unplug flags for all ports */
tmp = readl(host_mmio + hotplug_offset);
writel(tmp | 0xff, host_mmio + hotplug_offset);
tmp = readl(host_mmio + hotplug_offset);
if (is_gen2) /* unmask plug/unplug ints */
writel(tmp & ~0xff0000, host_mmio + hotplug_offset);
else /* mask plug/unplug ints */
writel(tmp | 0xff0000, host_mmio + hotplug_offset);
/* don't initialise TBG or SLEW on 2nd generation chips */
if (is_gen2)
return;
/* reduce TBG clock to 133 Mhz. */
tmp = readl(host_mmio + PDC_TBG_MODE);
tmp &= ~0x30000; /* clear bit 17, 16*/
tmp |= 0x10000; /* set bit 17:16 = 0:1 */
writel(tmp, host_mmio + PDC_TBG_MODE);
readl(host_mmio + PDC_TBG_MODE); /* flush */
msleep(10);
/* adjust slew rate control register. */
tmp = readl(host_mmio + PDC_SLEW_CTL);
tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */
tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
writel(tmp, host_mmio + PDC_SLEW_CTL);
}
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
void __iomem *host_mmio;
int n_ports, i, rc;
int is_sataii_tx4;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* enable and acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
return rc;
host_mmio = pcim_iomap_table(pdev)[PDC_MMIO_BAR];
/* determine port configuration and setup host */
n_ports = 2;
if (pi->flags & PDC_FLAG_4_PORTS)
n_ports = 4;
for (i = 0; i < n_ports; i++)
ppi[i] = pi;
if (pi->flags & PDC_FLAG_SATA_PATA) {
u8 tmp = readb(host_mmio + PDC_FLASH_CTL + 1);
if (!(tmp & 0x80))
ppi[n_ports++] = pi + 1;
}
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host) {
dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
host->iomap = pcim_iomap_table(pdev);
is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
unsigned int ata_no = pdc_port_no_to_ata_no(i, is_sataii_tx4);
unsigned int ata_offset = 0x200 + ata_no * 0x80;
unsigned int scr_offset = 0x400 + ata_no * 0x100;
pdc_ata_setup_port(ap, host_mmio + ata_offset, host_mmio + scr_offset);
ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, PDC_MMIO_BAR, ata_offset, "ata");
}
/* initialize adapter */
pdc_host_init(host);
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
return rc;
/* start host, request IRQ and attach */
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, pdc_interrupt, IRQF_SHARED,
&pdc_ata_sht);
}
static int __init pdc_ata_init(void)
{
return pci_register_driver(&pdc_ata_pci_driver);
}
static void __exit pdc_ata_exit(void)
{
pci_unregister_driver(&pdc_ata_pci_driver);
}
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
module_init(pdc_ata_init);
module_exit(pdc_ata_exit);
| gpl-2.0 |
neohackt/android_kernel_motorola_otus | arch/tile/kernel/sysfs.c | 4459 | 5125 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* /sys entry support.
*/
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <hv/hypervisor.h>
/* Return a string queried from the hypervisor, truncated to page size. */
static ssize_t get_hv_confstr(char *page, int query)
{
ssize_t n = hv_confstr(query, (unsigned long)page, PAGE_SIZE - 1);
n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1) - 1;
if (n)
page[n++] = '\n';
page[n] = '\0';
return n;
}
static ssize_t chip_width_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", smp_width);
}
static DEVICE_ATTR(chip_width, 0444, chip_width_show, NULL);
static ssize_t chip_height_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sprintf(page, "%u\n", smp_height);
}
static DEVICE_ATTR(chip_height, 0444, chip_height_show, NULL);
static ssize_t chip_serial_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM);
}
static DEVICE_ATTR(chip_serial, 0444, chip_serial_show, NULL);
static ssize_t chip_revision_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return get_hv_confstr(page, HV_CONFSTR_CHIP_REV);
}
static DEVICE_ATTR(chip_revision, 0444, chip_revision_show, NULL);
static ssize_t type_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
return sprintf(page, "tilera\n");
}
static DEVICE_ATTR(type, 0444, type_show, NULL);
#define HV_CONF_ATTR(name, conf) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
return get_hv_confstr(page, conf); \
} \
static DEVICE_ATTR(name, 0444, name ## _show, NULL);
HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER)
HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER)
HV_CONF_ATTR(board_part, HV_CONFSTR_BOARD_PART_NUM)
HV_CONF_ATTR(board_serial, HV_CONFSTR_BOARD_SERIAL_NUM)
HV_CONF_ATTR(board_revision, HV_CONFSTR_BOARD_REV)
HV_CONF_ATTR(board_description, HV_CONFSTR_BOARD_DESC)
HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
static struct attribute *board_attrs[] = {
&dev_attr_board_part.attr,
&dev_attr_board_serial.attr,
&dev_attr_board_revision.attr,
&dev_attr_board_description.attr,
&dev_attr_mezz_part.attr,
&dev_attr_mezz_serial.attr,
&dev_attr_mezz_revision.attr,
&dev_attr_mezz_description.attr,
&dev_attr_switch_control.attr,
NULL
};
static struct attribute_group board_attr_group = {
.name = "board",
.attrs = board_attrs,
};
static struct bin_attribute hvconfig_bin;
static ssize_t
hvconfig_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
static size_t size;
/* Lazily learn the true size (minus the trailing NUL). */
if (size == 0)
size = hv_confstr(HV_CONFSTR_HV_CONFIG, 0, 0) - 1;
/* Check and adjust input parameters. */
if (off > size)
return -EINVAL;
if (count > size - off)
count = size - off;
if (count) {
/* Get a copy of the hvc and copy out the relevant portion. */
char *hvc;
size = off + count;
hvc = kmalloc(size, GFP_KERNEL);
if (hvc == NULL)
return -ENOMEM;
hv_confstr(HV_CONFSTR_HV_CONFIG, (unsigned long)hvc, size);
memcpy(buf, hvc + off, count);
kfree(hvc);
}
return count;
}
static int __init create_sysfs_entries(void)
{
int err = 0;
#define create_cpu_attr(name) \
if (!err) \
err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name);
create_cpu_attr(chip_width);
create_cpu_attr(chip_height);
create_cpu_attr(chip_serial);
create_cpu_attr(chip_revision);
#define create_hv_attr(name) \
if (!err) \
err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name.attr);
create_hv_attr(type);
create_hv_attr(version);
create_hv_attr(config_version);
if (!err)
err = sysfs_create_group(hypervisor_kobj, &board_attr_group);
if (!err) {
sysfs_bin_attr_init(&hvconfig_bin);
hvconfig_bin.attr.name = "hvconfig";
hvconfig_bin.attr.mode = S_IRUGO;
hvconfig_bin.read = hvconfig_bin_read;
hvconfig_bin.size = PAGE_SIZE;
err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
}
return err;
}
subsys_initcall(create_sysfs_entries);
| gpl-2.0 |
elisam98/android_kernel_lge_w5c | arch/arm/plat-omap/omap_device.c | 4715 | 31383 |
/*
* omap_device implementation
*
* Copyright (C) 2009-2010 Nokia Corporation
* Paul Walmsley, Kevin Hilman
*
* Developed in collaboration with (alphabetical order): Benoit
* Cousson, Thara Gopinath, Tony Lindgren, Rajendra Nayak, Vikram
* Pandita, Sakari Poussa, Anand Sawant, Santosh Shilimkar, Richard
* Woodruff
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code provides a consistent interface for OMAP device drivers
* to control power management and interconnect properties of their
* devices.
*
* In the medium- to long-term, this code should either be
* a) implemented via arch-specific pointers in platform_data
* or
* b) implemented as a proper omap_bus/omap_device in Linux, no more
* platform_data func pointers
*
*
* Guidelines for usage by driver authors:
*
* 1. These functions are intended to be used by device drivers via
* function pointers in struct platform_data. As an example,
* omap_device_enable() should be passed to the driver as
*
* struct foo_driver_platform_data {
* ...
* int (*device_enable)(struct platform_device *pdev);
* ...
* }
*
* Note that the generic "device_enable" name is used, rather than
* "omap_device_enable". This is so other architectures can pass in their
* own enable/disable functions here.
*
* This should be populated during device setup:
*
* ...
* pdata->device_enable = omap_device_enable;
* ...
*
* 2. Drivers should first check to ensure the function pointer is not null
* before calling it, as in:
*
* if (pdata->device_enable)
* pdata->device_enable(pdev);
*
* This allows other architectures that don't use similar device_enable()/
* device_shutdown() functions to execute normally.
*
* ...
*
* Suggested usage by device drivers:
*
* During device initialization:
* device_enable()
*
* During device idle:
* (save remaining device context if necessary)
* device_idle();
*
* During device resume:
* device_enable();
* (restore context if necessary)
*
* During device shutdown:
* device_shutdown()
* (device must be reinitialized at this point to use it again)
*
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/notifier.h>
#include <plat/omap_device.h>
#include <plat/omap_hwmod.h>
#include <plat/clock.h>
/* These parameters are passed to _omap_device_{de,}activate() */
#define USE_WAKEUP_LAT 0
#define IGNORE_WAKEUP_LAT 1
static int omap_early_device_register(struct platform_device *pdev);
static struct omap_device_pm_latency omap_default_latency[] = {
{
.deactivate_func = omap_device_idle_hwmods,
.activate_func = omap_device_enable_hwmods,
.flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST,
}
};
/* Private functions */
/**
* _omap_device_activate - increase device readiness
* @od: struct omap_device *
* @ignore_lat: increase to latency target (0) or full readiness (1)?
*
* Increase readiness of omap_device @od (thus decreasing device
* wakeup latency, but consuming more power). If @ignore_lat is
* IGNORE_WAKEUP_LAT, make the omap_device fully active. Otherwise,
* if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup
* latency is greater than the requested maximum wakeup latency, step
* backwards in the omap_device_pm_latency table to ensure the
* device's maximum wakeup latency is less than or equal to the
* requested maximum wakeup latency. Returns 0.
*/
static int _omap_device_activate(struct omap_device *od, u8 ignore_lat)
{
struct timespec a, b, c;
dev_dbg(&od->pdev->dev, "omap_device: activating\n");
while (od->pm_lat_level > 0) {
struct omap_device_pm_latency *odpl;
unsigned long long act_lat = 0;
od->pm_lat_level--;
odpl = od->pm_lats + od->pm_lat_level;
if (!ignore_lat &&
(od->dev_wakeup_lat <= od->_dev_wakeup_lat_limit))
break;
read_persistent_clock(&a);
/* XXX check return code */
odpl->activate_func(od);
read_persistent_clock(&b);
c = timespec_sub(b, a);
act_lat = timespec_to_ns(&c);
dev_dbg(&od->pdev->dev,
"omap_device: pm_lat %d: activate: elapsed time "
"%llu nsec\n", od->pm_lat_level, act_lat);
if (act_lat > odpl->activate_lat) {
odpl->activate_lat_worst = act_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->activate_lat = act_lat;
dev_dbg(&od->pdev->dev,
"new worst case activate latency "
"%d: %llu\n",
od->pm_lat_level, act_lat);
} else
dev_warn(&od->pdev->dev,
"activate latency %d "
"higher than exptected. (%llu > %d)\n",
od->pm_lat_level, act_lat,
odpl->activate_lat);
}
od->dev_wakeup_lat -= odpl->activate_lat;
}
return 0;
}
/**
* _omap_device_deactivate - decrease device readiness
* @od: struct omap_device *
* @ignore_lat: decrease to latency target (0) or full inactivity (1)?
*
* Decrease readiness of omap_device @od (thus increasing device
* wakeup latency, but conserving power). If @ignore_lat is
* IGNORE_WAKEUP_LAT, make the omap_device fully inactive. Otherwise,
* if @ignore_lat is USE_WAKEUP_LAT, and the device's maximum wakeup
* latency is less than the requested maximum wakeup latency, step
* forwards in the omap_device_pm_latency table to ensure the device's
* maximum wakeup latency is less than or equal to the requested
* maximum wakeup latency. Returns 0.
*/
static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
{
struct timespec a, b, c;
dev_dbg(&od->pdev->dev, "omap_device: deactivating\n");
while (od->pm_lat_level < od->pm_lats_cnt) {
struct omap_device_pm_latency *odpl;
unsigned long long deact_lat = 0;
odpl = od->pm_lats + od->pm_lat_level;
if (!ignore_lat &&
((od->dev_wakeup_lat + odpl->activate_lat) >
od->_dev_wakeup_lat_limit))
break;
read_persistent_clock(&a);
/* XXX check return code */
odpl->deactivate_func(od);
read_persistent_clock(&b);
c = timespec_sub(b, a);
deact_lat = timespec_to_ns(&c);
dev_dbg(&od->pdev->dev,
"omap_device: pm_lat %d: deactivate: elapsed time "
"%llu nsec\n", od->pm_lat_level, deact_lat);
if (deact_lat > odpl->deactivate_lat) {
odpl->deactivate_lat_worst = deact_lat;
if (odpl->flags & OMAP_DEVICE_LATENCY_AUTO_ADJUST) {
odpl->deactivate_lat = deact_lat;
dev_dbg(&od->pdev->dev,
"new worst case deactivate latency "
"%d: %llu\n",
od->pm_lat_level, deact_lat);
} else
dev_warn(&od->pdev->dev,
"deactivate latency %d "
"higher than exptected. (%llu > %d)\n",
od->pm_lat_level, deact_lat,
odpl->deactivate_lat);
}
od->dev_wakeup_lat += odpl->activate_lat;
od->pm_lat_level++;
}
return 0;
}
static void _add_clkdev(struct omap_device *od, const char *clk_alias,
const char *clk_name)
{
struct clk *r;
struct clk_lookup *l;
if (!clk_alias || !clk_name)
return;
dev_dbg(&od->pdev->dev, "Creating %s -> %s\n", clk_alias, clk_name);
r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias);
if (!IS_ERR(r)) {
dev_warn(&od->pdev->dev,
"alias %s already exists\n", clk_alias);
clk_put(r);
return;
}
r = omap_clk_get_by_name(clk_name);
if (IS_ERR(r)) {
dev_err(&od->pdev->dev,
"omap_clk_get_by_name for %s failed\n", clk_name);
return;
}
l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev->dev));
if (!l) {
dev_err(&od->pdev->dev,
"clkdev_alloc for %s failed\n", clk_alias);
return;
}
clkdev_add(l);
}
/**
* _add_hwmod_clocks_clkdev - Add clkdev entry for hwmod optional clocks
* and main clock
* @od: struct omap_device *od
* @oh: struct omap_hwmod *oh
*
* For the main clock and every optional clock present per hwmod per
* omap_device, this function adds an entry in the clkdev table of the
* form <dev-id=dev_name, con-id=role> if it does not exist already.
*
* The function is called from inside omap_device_build_ss(), after
* omap_device_register.
*
* This allows drivers to get a pointer to its optional clocks based on its role
* by calling clk_get(<dev*>, <role>).
* In the case of the main clock, a "fck" alias is used.
*
* No return value.
*/
static void _add_hwmod_clocks_clkdev(struct omap_device *od,
struct omap_hwmod *oh)
{
int i;
_add_clkdev(od, "fck", oh->main_clk);
for (i = 0; i < oh->opt_clks_cnt; i++)
_add_clkdev(od, oh->opt_clks[i].role, oh->opt_clks[i].clk);
}
/**
* omap_device_build_from_dt - build an omap_device with multiple hwmods
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
* @pm_lats: pointer to a omap_device_pm_latency array for this device
* @pm_lats_cnt: ARRAY_SIZE() of @pm_lats
* @is_early_device: should the device be registered as an early device or not
*
* Function for building an omap_device already registered from device-tree
*
* Returns 0 or PTR_ERR() on error.
*/
static int omap_device_build_from_dt(struct platform_device *pdev)
{
struct omap_hwmod **hwmods;
struct omap_device *od;
struct omap_hwmod *oh;
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (!oh_cnt || IS_ERR_VALUE(oh_cnt)) {
dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n");
return -ENODEV;
}
hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
if (!hwmods) {
ret = -ENOMEM;
goto odbfd_exit;
}
for (i = 0; i < oh_cnt; i++) {
of_property_read_string_index(node, "ti,hwmods", i, &oh_name);
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n",
oh_name);
ret = -EINVAL;
goto odbfd_exit1;
}
hwmods[i] = oh;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt, NULL, 0);
if (!od) {
dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
oh_name);
ret = PTR_ERR(od);
goto odbfd_exit1;
}
if (of_get_property(node, "ti,no_idle_on_suspend", NULL))
omap_device_disable_idle_on_suspend(pdev);
pdev->dev.pm_domain = &omap_device_pm_domain;
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
return ret;
}
static int _omap_device_notifier_call(struct notifier_block *nb,
unsigned long event, void *dev)
{
struct platform_device *pdev = to_platform_device(dev);
switch (event) {
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
break;
case BUS_NOTIFY_DEL_DEVICE:
if (pdev->archdata.od)
omap_device_delete(pdev->archdata.od);
break;
}
return NOTIFY_DONE;
}
/* Public functions for use by core code */
/**
* omap_device_get_context_loss_count - get lost context count
* @od: struct omap_device *
*
* Using the primary hwmod, query the context loss count for this
* device.
*
* Callers should consider context for this device lost any time this
* function returns a value different than the value the caller got
* the last time it called this function.
*
* If any hwmods exist for the omap_device assoiated with @pdev,
* return the context loss counter for that hwmod, otherwise return
* zero.
*/
int omap_device_get_context_loss_count(struct platform_device *pdev)
{
struct omap_device *od;
u32 ret = 0;
od = to_omap_device(pdev);
if (od->hwmods_cnt)
ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
return ret;
}
/**
* omap_device_count_resources - count number of struct resource entries needed
* @od: struct omap_device *
*
* Count the number of struct resource entries needed for this
* omap_device @od. Used by omap_device_build_ss() to determine how
* much memory to allocate before calling
* omap_device_fill_resources(). Returns the count.
*/
static int omap_device_count_resources(struct omap_device *od)
{
int c = 0;
int i;
for (i = 0; i < od->hwmods_cnt; i++)
c += omap_hwmod_count_resources(od->hwmods[i]);
pr_debug("omap_device: %s: counted %d total resources across %d "
"hwmods\n", od->pdev->name, c, od->hwmods_cnt);
return c;
}
/**
* omap_device_fill_resources - fill in array of struct resource
* @od: struct omap_device *
* @res: pointer to an array of struct resource to be filled in
*
* Populate one or more empty struct resource pointed to by @res with
* the resource data for this omap_device @od. Used by
* omap_device_build_ss() after calling omap_device_count_resources().
* Ideally this function would not be needed at all. If omap_device
* replaces platform_device, then we can specify our own
* get_resource()/ get_irq()/etc functions that use the underlying
* omap_hwmod information. Or if platform_device is extended to use
* subarchitecture-specific function pointers, the various
* platform_device functions can simply call omap_device internal
* functions to get device resources. Hacking around the existing
* platform_device code wastes memory. Returns 0.
*/
static int omap_device_fill_resources(struct omap_device *od,
struct resource *res)
{
int c = 0;
int i, r;
for (i = 0; i < od->hwmods_cnt; i++) {
r = omap_hwmod_fill_resources(od->hwmods[i], res);
res += r;
c += r;
}
return 0;
}
/**
* omap_device_alloc - allocate an omap_device
* @pdev: platform_device that will be included in this omap_device
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
* @pm_lats: pointer to a omap_device_pm_latency array for this device
* @pm_lats_cnt: ARRAY_SIZE() of @pm_lats
*
* Convenience function for allocating an omap_device structure and filling
* hwmods, resources and pm_latency attributes.
*
* Returns an struct omap_device pointer or ERR_PTR() on error;
*/
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt,
struct omap_device_pm_latency *pm_lats,
int pm_lats_cnt)
{
int ret = -ENOMEM;
struct omap_device *od;
struct resource *res = NULL;
int i, res_count;
struct omap_hwmod **hwmods;
od = kzalloc(sizeof(struct omap_device), GFP_KERNEL);
if (!od) {
ret = -ENOMEM;
goto oda_exit1;
}
od->hwmods_cnt = oh_cnt;
hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
if (!hwmods)
goto oda_exit2;
od->hwmods = hwmods;
od->pdev = pdev;
/*
* HACK: Ideally the resources from DT should match, and hwmod
* should just add the missing ones. Since the name is not
* properly populated by DT, stick to hwmod resources only.
*/
if (pdev->num_resources && pdev->resource)
dev_warn(&pdev->dev, "%s(): resources already allocated %d\n",
__func__, pdev->num_resources);
res_count = omap_device_count_resources(od);
if (res_count > 0) {
dev_dbg(&pdev->dev, "%s(): resources allocated from hwmod %d\n",
__func__, res_count);
res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL);
if (!res)
goto oda_exit3;
omap_device_fill_resources(od, res);
ret = platform_device_add_resources(pdev, res, res_count);
kfree(res);
if (ret)
goto oda_exit3;
}
if (!pm_lats) {
pm_lats = omap_default_latency;
pm_lats_cnt = ARRAY_SIZE(omap_default_latency);
}
od->pm_lats_cnt = pm_lats_cnt;
od->pm_lats = kmemdup(pm_lats,
sizeof(struct omap_device_pm_latency) * pm_lats_cnt,
GFP_KERNEL);
if (!od->pm_lats)
goto oda_exit3;
pdev->archdata.od = od;
for (i = 0; i < oh_cnt; i++) {
hwmods[i]->od = od;
_add_hwmod_clocks_clkdev(od, hwmods[i]);
}
return od;
oda_exit3:
kfree(hwmods);
oda_exit2:
kfree(od);
oda_exit1:
dev_err(&pdev->dev, "omap_device: build failed (%d)\n", ret);
return ERR_PTR(ret);
}
void omap_device_delete(struct omap_device *od)
{
if (!od)
return;
od->pdev->archdata.od = NULL;
kfree(od->pm_lats);
kfree(od->hwmods);
kfree(od);
}
/**
* omap_device_build - build and register an omap_device with one omap_hwmod
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
* @pm_lats: pointer to a omap_device_pm_latency array for this device
* @pm_lats_cnt: ARRAY_SIZE() of @pm_lats
* @is_early_device: should the device be registered as an early device or not
*
* Convenience function for building and registering a single
* omap_device record, which in turn builds and registers a
* platform_device record. See omap_device_build_ss() for more
* information. Returns ERR_PTR(-EINVAL) if @oh is NULL; otherwise,
* passes along the return value of omap_device_build_ss().
*/
struct platform_device __init *omap_device_build(const char *pdev_name, int pdev_id,
struct omap_hwmod *oh, void *pdata,
int pdata_len,
struct omap_device_pm_latency *pm_lats,
int pm_lats_cnt, int is_early_device)
{
struct omap_hwmod *ohs[] = { oh };
if (!oh)
return ERR_PTR(-EINVAL);
return omap_device_build_ss(pdev_name, pdev_id, ohs, 1, pdata,
pdata_len, pm_lats, pm_lats_cnt,
is_early_device);
}
/**
* omap_device_build_ss - build and register an omap_device with multiple hwmods
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
* @pm_lats: pointer to a omap_device_pm_latency array for this device
* @pm_lats_cnt: ARRAY_SIZE() of @pm_lats
* @is_early_device: should the device be registered as an early device or not
*
* Convenience function for building and registering an omap_device
* subsystem record. Subsystem records consist of multiple
* omap_hwmods. This function in turn builds and registers a
* platform_device record. Returns an ERR_PTR() on error, or passes
* along the return value of omap_device_register().
*/
struct platform_device __init *omap_device_build_ss(const char *pdev_name, int pdev_id,
struct omap_hwmod **ohs, int oh_cnt,
void *pdata, int pdata_len,
struct omap_device_pm_latency *pm_lats,
int pm_lats_cnt, int is_early_device)
{
int ret = -ENOMEM;
struct platform_device *pdev;
struct omap_device *od;
if (!ohs || oh_cnt == 0 || !pdev_name)
return ERR_PTR(-EINVAL);
if (!pdata && pdata_len > 0)
return ERR_PTR(-EINVAL);
pdev = platform_device_alloc(pdev_name, pdev_id);
if (!pdev) {
ret = -ENOMEM;
goto odbs_exit;
}
/* Set the dev_name early to allow dev_xxx in omap_device_alloc */
if (pdev->id != -1)
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
dev_set_name(&pdev->dev, "%s", pdev->name);
od = omap_device_alloc(pdev, ohs, oh_cnt, pm_lats, pm_lats_cnt);
if (!od)
goto odbs_exit1;
ret = platform_device_add_data(pdev, pdata, pdata_len);
if (ret)
goto odbs_exit2;
if (is_early_device)
ret = omap_early_device_register(pdev);
else
ret = omap_device_register(pdev);
if (ret)
goto odbs_exit2;
return pdev;
odbs_exit2:
omap_device_delete(od);
odbs_exit1:
platform_device_put(pdev);
odbs_exit:
pr_err("omap_device: %s: build failed (%d)\n", pdev_name, ret);
return ERR_PTR(ret);
}
/**
* omap_early_device_register - register an omap_device as an early platform
* device.
* @od: struct omap_device * to register
*
* Register the omap_device structure. This currently just calls
* platform_early_add_device() on the underlying platform_device.
* Returns 0 by default.
*/
static int __init omap_early_device_register(struct platform_device *pdev)
{
struct platform_device *devices[1];
devices[0] = pdev;
early_platform_add_devices(devices, 1);
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
ret = pm_generic_runtime_suspend(dev);
if (!ret)
omap_device_idle(pdev);
return ret;
}
static int _od_runtime_idle(struct device *dev)
{
return pm_generic_runtime_idle(dev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
omap_device_enable(pdev);
return pm_generic_runtime_resume(dev);
}
#endif
#ifdef CONFIG_SUSPEND
static int _od_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
int ret;
ret = pm_generic_suspend_noirq(dev);
if (!ret && !pm_runtime_status_suspended(dev)) {
if (pm_generic_runtime_suspend(dev) == 0) {
if (!(od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND))
omap_device_idle(pdev);
od->flags |= OMAP_DEVICE_SUSPENDED;
}
}
return ret;
}
static int _od_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
!pm_runtime_status_suspended(dev)) {
od->flags &= ~OMAP_DEVICE_SUSPENDED;
if (!(od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND))
omap_device_enable(pdev);
pm_generic_runtime_resume(dev);
}
return pm_generic_resume_noirq(dev);
}
#else
#define _od_suspend_noirq NULL
#define _od_resume_noirq NULL
#endif
struct dev_pm_domain omap_device_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
_od_runtime_idle)
USE_PLATFORM_PM_SLEEP_OPS
.suspend_noirq = _od_suspend_noirq,
.resume_noirq = _od_resume_noirq,
}
};
/**
* omap_device_register - register an omap_device with one omap_hwmod
* @od: struct omap_device * to register
*
* Register the omap_device structure. This currently just calls
* platform_device_register() on the underlying platform_device.
* Returns the return value of platform_device_register().
*/
int omap_device_register(struct platform_device *pdev)
{
pr_debug("omap_device: %s: registering\n", pdev->name);
pdev->dev.pm_domain = &omap_device_pm_domain;
return platform_device_add(pdev);
}
/* Public functions for use by device drivers through struct platform_data */
/**
* omap_device_enable - fully activate an omap_device
* @od: struct omap_device * to activate
*
* Do whatever is necessary for the hwmods underlying omap_device @od
* to be accessible and ready to operate. This generally involves
* enabling clocks, setting SYSCONFIG registers; and in the future may
* involve remuxing pins. Device drivers should call this function
* (through platform_data function pointers) where they would normally
* enable clocks, etc. Returns -EINVAL if called when the omap_device
* is already enabled, or passes along the return value of
* _omap_device_activate().
*/
int omap_device_enable(struct platform_device *pdev)
{
int ret;
struct omap_device *od;
od = to_omap_device(pdev);
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(&pdev->dev,
"omap_device: %s() called from invalid state %d\n",
__func__, od->_state);
return -EINVAL;
}
/* Enable everything if we're enabling this device from scratch */
if (od->_state == OMAP_DEVICE_STATE_UNKNOWN)
od->pm_lat_level = od->pm_lats_cnt;
ret = _omap_device_activate(od, IGNORE_WAKEUP_LAT);
od->dev_wakeup_lat = 0;
od->_dev_wakeup_lat_limit = UINT_MAX;
od->_state = OMAP_DEVICE_STATE_ENABLED;
return ret;
}
/**
* omap_device_idle - idle an omap_device
* @od: struct omap_device * to idle
*
* Idle omap_device @od by calling as many .deactivate_func() entries
* in the omap_device's pm_lats table as is possible without exceeding
* the device's maximum wakeup latency limit, pm_lat_limit. Device
* drivers should call this function (through platform_data function
* pointers) where they would normally disable clocks after operations
* complete, etc.. Returns -EINVAL if the omap_device is not
* currently enabled, or passes along the return value of
* _omap_device_deactivate().
*/
int omap_device_idle(struct platform_device *pdev)
{
int ret;
struct omap_device *od;
od = to_omap_device(pdev);
if (od->_state != OMAP_DEVICE_STATE_ENABLED) {
dev_warn(&pdev->dev,
"omap_device: %s() called from invalid state %d\n",
__func__, od->_state);
return -EINVAL;
}
ret = _omap_device_deactivate(od, USE_WAKEUP_LAT);
od->_state = OMAP_DEVICE_STATE_IDLE;
return ret;
}
/**
* omap_device_shutdown - shut down an omap_device
* @od: struct omap_device * to shut down
*
* Shut down omap_device @od by calling all .deactivate_func() entries
* in the omap_device's pm_lats table and then shutting down all of
* the underlying omap_hwmods. Used when a device is being "removed"
* or a device driver is being unloaded. Returns -EINVAL if the
* omap_device is not currently enabled or idle, or passes along the
* return value of _omap_device_deactivate().
*/
int omap_device_shutdown(struct platform_device *pdev)
{
int ret, i;
struct omap_device *od;
od = to_omap_device(pdev);
if (od->_state != OMAP_DEVICE_STATE_ENABLED &&
od->_state != OMAP_DEVICE_STATE_IDLE) {
dev_warn(&pdev->dev,
"omap_device: %s() called from invalid state %d\n",
__func__, od->_state);
return -EINVAL;
}
ret = _omap_device_deactivate(od, IGNORE_WAKEUP_LAT);
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_shutdown(od->hwmods[i]);
od->_state = OMAP_DEVICE_STATE_SHUTDOWN;
return ret;
}
/**
* omap_device_align_pm_lat - activate/deactivate device to match wakeup lat lim
* @od: struct omap_device *
*
* When a device's maximum wakeup latency limit changes, call some of
* the .activate_func or .deactivate_func function pointers in the
* omap_device's pm_lats array to ensure that the device's maximum
* wakeup latency is less than or equal to the new latency limit.
* Intended to be called by OMAP PM code whenever a device's maximum
* wakeup latency limit changes (e.g., via
* omap_pm_set_dev_wakeup_lat()). Returns 0 if nothing needs to be
* done (e.g., if the omap_device is not currently idle, or if the
* wakeup latency is already current with the new limit) or passes
* along the return value of _omap_device_deactivate() or
* _omap_device_activate().
*/
int omap_device_align_pm_lat(struct platform_device *pdev,
u32 new_wakeup_lat_limit)
{
int ret = -EINVAL;
struct omap_device *od;
od = to_omap_device(pdev);
if (new_wakeup_lat_limit == od->dev_wakeup_lat)
return 0;
od->_dev_wakeup_lat_limit = new_wakeup_lat_limit;
if (od->_state != OMAP_DEVICE_STATE_IDLE)
return 0;
else if (new_wakeup_lat_limit > od->dev_wakeup_lat)
ret = _omap_device_deactivate(od, USE_WAKEUP_LAT);
else if (new_wakeup_lat_limit < od->dev_wakeup_lat)
ret = _omap_device_activate(od, USE_WAKEUP_LAT);
return ret;
}
/**
* omap_device_get_pwrdm - return the powerdomain * associated with @od
* @od: struct omap_device *
*
* Return the powerdomain associated with the first underlying
* omap_hwmod for this omap_device. Intended for use by core OMAP PM
* code. Returns NULL on error or a struct powerdomain * upon
* success.
*/
struct powerdomain *omap_device_get_pwrdm(struct omap_device *od)
{
/*
* XXX Assumes that all omap_hwmod powerdomains are identical.
* This may not necessarily be true. There should be a sanity
* check in here to WARN() if any difference appears.
*/
if (!od->hwmods_cnt)
return NULL;
return omap_hwmod_get_pwrdm(od->hwmods[0]);
}
/**
* omap_device_get_mpu_rt_va - return the MPU's virtual addr for the hwmod base
* @od: struct omap_device *
*
* Return the MPU's virtual address for the base of the hwmod, from
* the ioremap() that the hwmod code does. Only valid if there is one
* hwmod associated with this device. Returns NULL if there are zero
* or more than one hwmods associated with this omap_device;
* otherwise, passes along the return value from
* omap_hwmod_get_mpu_rt_va().
*/
void __iomem *omap_device_get_rt_va(struct omap_device *od)
{
if (od->hwmods_cnt != 1)
return NULL;
return omap_hwmod_get_mpu_rt_va(od->hwmods[0]);
}
/**
* omap_device_get_by_hwmod_name() - convert a hwmod name to
* device pointer.
* @oh_name: name of the hwmod device
*
* Returns back a struct device * pointer associated with a hwmod
* device represented by a hwmod_name
*/
struct device *omap_device_get_by_hwmod_name(const char *oh_name)
{
struct omap_hwmod *oh;
if (!oh_name) {
WARN(1, "%s: no hwmod name!\n", __func__);
return ERR_PTR(-EINVAL);
}
oh = omap_hwmod_lookup(oh_name);
if (IS_ERR_OR_NULL(oh)) {
WARN(1, "%s: no hwmod for %s\n", __func__,
oh_name);
return ERR_PTR(oh ? PTR_ERR(oh) : -ENODEV);
}
if (IS_ERR_OR_NULL(oh->od)) {
WARN(1, "%s: no omap_device for %s\n", __func__,
oh_name);
return ERR_PTR(oh->od ? PTR_ERR(oh->od) : -ENODEV);
}
if (IS_ERR_OR_NULL(oh->od->pdev))
return ERR_PTR(oh->od->pdev ? PTR_ERR(oh->od->pdev) : -ENODEV);
return &oh->od->pdev->dev;
}
EXPORT_SYMBOL(omap_device_get_by_hwmod_name);
/*
* Public functions intended for use in omap_device_pm_latency
* .activate_func and .deactivate_func function pointers
*/
/**
* omap_device_enable_hwmods - call omap_hwmod_enable() on all hwmods
* @od: struct omap_device *od
*
* Enable all underlying hwmods. Returns 0.
*/
int omap_device_enable_hwmods(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_enable(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
/**
* omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods
* @od: struct omap_device *od
*
* Idle all underlying hwmods. Returns 0.
*/
int omap_device_idle_hwmods(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_idle(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
/**
* omap_device_disable_clocks - disable all main and interface clocks
* @od: struct omap_device *od
*
* Disable the main functional clock and interface clock for all of the
* omap_hwmods associated with the omap_device. Returns 0.
*/
int omap_device_disable_clocks(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_disable_clocks(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
/**
* omap_device_enable_clocks - enable all main and interface clocks
* @od: struct omap_device *od
*
* Enable the main functional clock and interface clock for all of the
* omap_hwmods associated with the omap_device. Returns 0.
*/
int omap_device_enable_clocks(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_enable_clocks(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
static struct notifier_block platform_nb = {
.notifier_call = _omap_device_notifier_call,
};
static int __init omap_device_init(void)
{
bus_register_notifier(&platform_bus_type, &platform_nb);
return 0;
}
core_initcall(omap_device_init);
| gpl-2.0 |
vantinh1991/F240L-JB | fs/proc/kcore.c | 4971 | 15533 | /*
* fs/proc/kcore.c kernel ELF core dumper
*
* Modelled on fs/exec.c:aout_core_dump()
* Jeremy Fitzhardinge <jeremy@sw.oz.au>
* ELF version written by David Howells <David.Howells@nexor.co.uk>
* Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
* Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
* Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
*/
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/user.h>
#include <linux/capability.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/list.h>
#include <linux/ioport.h>
#include <linux/memory.h>
#include <asm/sections.h>
#define CORE_STR "CORE"
#ifndef ELF_CORE_EFLAGS
#define ELF_CORE_EFLAGS 0
#endif
static struct proc_dir_entry *proc_root_kcore;
#ifndef kc_vaddr_to_offset
#define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
#endif
#ifndef kc_offset_to_vaddr
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif
/* An ELF note in memory */
struct memelfnote
{
const char *name;
int type;
unsigned int datasz;
void *data;
};
static LIST_HEAD(kclist_head);
static DEFINE_RWLOCK(kclist_lock);
static int kcore_need_update = 1;
void
kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
new->addr = (unsigned long)addr;
new->size = size;
new->type = type;
write_lock(&kclist_lock);
list_add_tail(&new->list, &kclist_head);
write_unlock(&kclist_lock);
}
static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
{
size_t try, size;
struct kcore_list *m;
*nphdr = 1; /* PT_NOTE */
size = 0;
list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
*nphdr = *nphdr + 1;
}
*elf_buflen = sizeof(struct elfhdr) +
(*nphdr + 2)*sizeof(struct elf_phdr) +
3 * ((sizeof(struct elf_note)) +
roundup(sizeof(CORE_STR), 4)) +
roundup(sizeof(struct elf_prstatus), 4) +
roundup(sizeof(struct elf_prpsinfo), 4) +
roundup(sizeof(struct task_struct), 4);
*elf_buflen = PAGE_ALIGN(*elf_buflen);
return size + *elf_buflen;
}
static void free_kclist_ents(struct list_head *head)
{
struct kcore_list *tmp, *pos;
list_for_each_entry_safe(pos, tmp, head, list) {
list_del(&pos->list);
kfree(pos);
}
}
/*
* Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
*/
static void __kcore_update_ram(struct list_head *list)
{
int nphdr;
size_t size;
struct kcore_list *tmp, *pos;
LIST_HEAD(garbage);
write_lock(&kclist_lock);
if (kcore_need_update) {
list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
if (pos->type == KCORE_RAM
|| pos->type == KCORE_VMEMMAP)
list_move(&pos->list, &garbage);
}
list_splice_tail(list, &kclist_head);
} else
list_splice(list, &garbage);
kcore_need_update = 0;
proc_root_kcore->size = get_kcore_size(&nphdr, &size);
write_unlock(&kclist_lock);
free_kclist_ents(&garbage);
}
#ifdef CONFIG_HIGHMEM
/*
* If no highmem, we can assume [0...max_low_pfn) continuous range of memory
* because memory hole is not as big as !HIGHMEM case.
* (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
*/
static int kcore_update_ram(void)
{
LIST_HEAD(head);
struct kcore_list *ent;
int ret = 0;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->addr = (unsigned long)__va(0);
ent->size = max_low_pfn << PAGE_SHIFT;
ent->type = KCORE_RAM;
list_add(&ent->list, &head);
__kcore_update_ram(&head);
return ret;
}
#else /* !CONFIG_HIGHMEM */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* calculate vmemmap's address from given system ram pfn and register it */
static int
get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
{
unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
unsigned long nr_pages = ent->size >> PAGE_SHIFT;
unsigned long start, end;
struct kcore_list *vmm, *tmp;
start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
end = ALIGN(end, PAGE_SIZE);
/* overlap check (because we have to align page */
list_for_each_entry(tmp, head, list) {
if (tmp->type != KCORE_VMEMMAP)
continue;
if (start < tmp->addr + tmp->size)
if (end > tmp->addr)
end = tmp->addr;
}
if (start < end) {
vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
if (!vmm)
return 0;
vmm->addr = start;
vmm->size = end - start;
vmm->type = KCORE_VMEMMAP;
list_add_tail(&vmm->list, head);
}
return 1;
}
#else
static int
get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
{
return 1;
}
#endif
static int
kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
{
struct list_head *head = (struct list_head *)arg;
struct kcore_list *ent;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
ent->size = nr_pages << PAGE_SHIFT;
/* Sanity check: Can happen in 32bit arch...maybe */
if (ent->addr < (unsigned long) __va(0))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */
if (ULONG_MAX - ent->addr < ent->size)
ent->size = ULONG_MAX - ent->addr;
/* cut when vmalloc() area is higher than direct-map area */
if (VMALLOC_START > (unsigned long)__va(0)) {
if (ent->addr > VMALLOC_START)
goto free_out;
if (VMALLOC_START - ent->addr < ent->size)
ent->size = VMALLOC_START - ent->addr;
}
ent->type = KCORE_RAM;
list_add_tail(&ent->list, head);
if (!get_sparsemem_vmemmap_info(ent, head)) {
list_del(&ent->list);
goto free_out;
}
return 0;
free_out:
kfree(ent);
return 1;
}
static int kcore_update_ram(void)
{
int nid, ret;
unsigned long end_pfn;
LIST_HEAD(head);
/* Not inialized....update now */
/* find out "max pfn" */
end_pfn = 0;
for_each_node_state(nid, N_HIGH_MEMORY) {
unsigned long node_end;
node_end = NODE_DATA(nid)->node_start_pfn +
NODE_DATA(nid)->node_spanned_pages;
if (end_pfn < node_end)
end_pfn = node_end;
}
/* scan 0 to max_pfn */
ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
if (ret) {
free_kclist_ents(&head);
return -ENOMEM;
}
__kcore_update_ram(&head);
return ret;
}
#endif /* CONFIG_HIGHMEM */
/*****************************************************************************/
/*
* determine size of ELF note
*/
static int notesize(struct memelfnote *en)
{
int sz;
sz = sizeof(struct elf_note);
sz += roundup((strlen(en->name) + 1), 4);
sz += roundup(en->datasz, 4);
return sz;
} /* end notesize() */
/*****************************************************************************/
/*
* store a note in the header buffer
*/
static char *storenote(struct memelfnote *men, char *bufp)
{
struct elf_note en;
#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
en.n_namesz = strlen(men->name) + 1;
en.n_descsz = men->datasz;
en.n_type = men->type;
DUMP_WRITE(&en, sizeof(en));
DUMP_WRITE(men->name, en.n_namesz);
/* XXX - cast from long long to long to avoid need for libgcc.a */
bufp = (char*) roundup((unsigned long)bufp,4);
DUMP_WRITE(men->data, men->datasz);
bufp = (char*) roundup((unsigned long)bufp,4);
#undef DUMP_WRITE
return bufp;
} /* end storenote() */
/*
* store an ELF coredump header in the supplied buffer
* nphdr is the number of elf_phdr to insert
*/
static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
{
struct elf_prstatus prstatus; /* NT_PRSTATUS */
struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
struct elf_phdr *nhdr, *phdr;
struct elfhdr *elf;
struct memelfnote notes[3];
off_t offset = 0;
struct kcore_list *m;
/* setup ELF header */
elf = (struct elfhdr *) bufp;
bufp += sizeof(struct elfhdr);
offset += sizeof(struct elfhdr);
memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_ident[EI_DATA] = ELF_DATA;
elf->e_ident[EI_VERSION]= EV_CURRENT;
elf->e_ident[EI_OSABI] = ELF_OSABI;
memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
elf->e_type = ET_CORE;
elf->e_machine = ELF_ARCH;
elf->e_version = EV_CURRENT;
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
elf->e_flags = ELF_CORE_EFLAGS;
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize= sizeof(struct elf_phdr);
elf->e_phnum = nphdr;
elf->e_shentsize= 0;
elf->e_shnum = 0;
elf->e_shstrndx = 0;
/* setup ELF PT_NOTE program header */
nhdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
nhdr->p_type = PT_NOTE;
nhdr->p_offset = 0;
nhdr->p_vaddr = 0;
nhdr->p_paddr = 0;
nhdr->p_filesz = 0;
nhdr->p_memsz = 0;
nhdr->p_flags = 0;
nhdr->p_align = 0;
/* setup ELF PT_LOAD program header for every area */
list_for_each_entry(m, &kclist_head, list) {
phdr = (struct elf_phdr *) bufp;
bufp += sizeof(struct elf_phdr);
offset += sizeof(struct elf_phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
phdr->p_vaddr = (size_t)m->addr;
phdr->p_paddr = 0;
phdr->p_filesz = phdr->p_memsz = m->size;
phdr->p_align = PAGE_SIZE;
}
/*
* Set up the notes in similar form to SVR4 core dumps made
* with info from their /proc.
*/
nhdr->p_offset = offset;
/* set up the process status */
notes[0].name = CORE_STR;
notes[0].type = NT_PRSTATUS;
notes[0].datasz = sizeof(struct elf_prstatus);
notes[0].data = &prstatus;
memset(&prstatus, 0, sizeof(struct elf_prstatus));
nhdr->p_filesz = notesize(¬es[0]);
bufp = storenote(¬es[0], bufp);
/* set up the process info */
notes[1].name = CORE_STR;
notes[1].type = NT_PRPSINFO;
notes[1].datasz = sizeof(struct elf_prpsinfo);
notes[1].data = &prpsinfo;
memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
prpsinfo.pr_state = 0;
prpsinfo.pr_sname = 'R';
prpsinfo.pr_zomb = 0;
strcpy(prpsinfo.pr_fname, "vmlinux");
strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
nhdr->p_filesz += notesize(¬es[1]);
bufp = storenote(¬es[1], bufp);
/* set up the task structure */
notes[2].name = CORE_STR;
notes[2].type = NT_TASKSTRUCT;
notes[2].datasz = sizeof(struct task_struct);
notes[2].data = current;
nhdr->p_filesz += notesize(¬es[2]);
bufp = storenote(¬es[2], bufp);
} /* end elf_kcore_store_hdr() */
/*****************************************************************************/
/*
* read from the ELF header and then kernel memory
*/
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
int nphdr;
unsigned long start;
read_lock(&kclist_lock);
size = get_kcore_size(&nphdr, &elf_buflen);
if (buflen == 0 || *fpos >= size) {
read_unlock(&kclist_lock);
return 0;
}
/* trim buflen to not go beyond EOF */
if (buflen > size - *fpos)
buflen = size - *fpos;
/* construct an ELF core header if we'll need some of it */
if (*fpos < elf_buflen) {
char * elf_buf;
tsz = elf_buflen - *fpos;
if (buflen < tsz)
tsz = buflen;
elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
if (!elf_buf) {
read_unlock(&kclist_lock);
return -ENOMEM;
}
elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
read_unlock(&kclist_lock);
if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
kfree(elf_buf);
return -EFAULT;
}
kfree(elf_buf);
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
if (buflen == 0)
return acc;
} else
read_unlock(&kclist_lock);
/*
* Check to see if our file offset matches with any of
* the addresses in the elf_phdr on our list.
*/
start = kc_offset_to_vaddr(*fpos - elf_buflen);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
while (buflen) {
struct kcore_list *m;
read_lock(&kclist_lock);
list_for_each_entry(m, &kclist_head, list) {
if (start >= m->addr && start < (m->addr+m->size))
break;
}
read_unlock(&kclist_lock);
if (&m->list == &kclist_head) {
if (clear_user(buffer, tsz))
return -EFAULT;
} else if (is_vmalloc_or_module_addr((void *)start)) {
char * elf_buf;
elf_buf = kzalloc(tsz, GFP_KERNEL);
if (!elf_buf)
return -ENOMEM;
vread(elf_buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, elf_buf, tsz)) {
kfree(elf_buf);
return -EFAULT;
}
kfree(elf_buf);
} else {
if (kern_addr_valid(start)) {
unsigned long n;
n = copy_to_user(buffer, (char *)start, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
* we clear too and hope it will trigger the
* EFAULT again.
*/
if (n) {
if (clear_user(buffer + tsz - n,
n))
return -EFAULT;
}
} else {
if (clear_user(buffer, tsz))
return -EFAULT;
}
}
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
start += tsz;
tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
}
return acc;
}
static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
mutex_lock(&inode->i_mutex);
i_size_write(inode, proc_root_kcore->size);
mutex_unlock(&inode->i_mutex);
}
return 0;
}
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
.llseek = default_llseek,
};
#ifdef CONFIG_MEMORY_HOTPLUG
/* just remember that we have to update kcore */
static int __meminit kcore_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
switch (action) {
case MEM_ONLINE:
case MEM_OFFLINE:
write_lock(&kclist_lock);
kcore_need_update = 1;
write_unlock(&kclist_lock);
}
return NOTIFY_OK;
}
#endif
static struct kcore_list kcore_vmalloc;
#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
static struct kcore_list kcore_text;
/*
* If defined, special segment is used for mapping kernel text instead of
* direct-map area. We need to create special TEXT section.
*/
static void __init proc_kcore_text_init(void)
{
kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
}
#else
static void __init proc_kcore_text_init(void)
{
}
#endif
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
/*
* MODULES_VADDR has no intersection with VMALLOC_ADDR.
*/
struct kcore_list kcore_modules;
static void __init add_modules_range(void)
{
kclist_add(&kcore_modules, (void *)MODULES_VADDR,
MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
}
#else
static void __init add_modules_range(void)
{
}
#endif
static int __init proc_kcore_init(void)
{
proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
&proc_kcore_operations);
if (!proc_root_kcore) {
printk(KERN_ERR "couldn't create /proc/kcore\n");
return 0; /* Always returns 0. */
}
/* Store text area if it's special */
proc_kcore_text_init();
/* Store vmalloc area */
kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
add_modules_range();
/* Store direct-map area from physical memory map */
kcore_update_ram();
hotplug_memory_notifier(kcore_callback, 0);
return 0;
}
module_init(proc_kcore_init);
| gpl-2.0 |
TeamJB/android_kernel_lge_hammerhead | net/ipv4/xfrm4_tunnel.c | 7275 | 2765 | /* xfrm4_tunnel.c: Generic IP tunnel transformer.
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*/
#define pr_fmt(fmt) "IPsec: " fmt
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/protocol.h>
static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
{
skb_push(skb, -skb_network_offset(skb));
return 0;
}
static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb)
{
return ip_hdr(skb)->protocol;
}
static int ipip_init_state(struct xfrm_state *x)
{
if (x->props.mode != XFRM_MODE_TUNNEL)
return -EINVAL;
if (x->encap)
return -EINVAL;
x->props.header_len = sizeof(struct iphdr);
return 0;
}
static void ipip_destroy(struct xfrm_state *x)
{
}
static const struct xfrm_type ipip_type = {
.description = "IPIP",
.owner = THIS_MODULE,
.proto = IPPROTO_IPIP,
.init_state = ipip_init_state,
.destructor = ipip_destroy,
.input = ipip_xfrm_rcv,
.output = ipip_output
};
static int xfrm_tunnel_rcv(struct sk_buff *skb)
{
return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
}
static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
{
return -ENOENT;
}
static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};
#if IS_ENABLED(CONFIG_IPV6)
static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};
#endif
static int __init ipip_init(void)
{
if (xfrm_register_type(&ipip_type, AF_INET) < 0) {
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) {
pr_info("%s: can't add xfrm handler for AF_INET\n", __func__);
xfrm_unregister_type(&ipip_type, AF_INET);
return -EAGAIN;
}
#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) {
pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__);
xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET);
xfrm_unregister_type(&ipip_type, AF_INET);
return -EAGAIN;
}
#endif
return 0;
}
static void __exit ipip_fini(void)
{
#if IS_ENABLED(CONFIG_IPV6)
if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6))
pr_info("%s: can't remove xfrm handler for AF_INET6\n",
__func__);
#endif
if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET))
pr_info("%s: can't remove xfrm handler for AF_INET\n",
__func__);
if (xfrm_unregister_type(&ipip_type, AF_INET) < 0)
pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(ipip_init);
module_exit(ipip_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
| gpl-2.0 |
XiaoJiang/linux-3.4.4 | arch/mips/sni/a20r.c | 7531 | 5061 | /*
* A20R specific code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <asm/sni.h>
#include <asm/time.h>
#define PORT(_base,_irq) \
{ \
.iobase = _base, \
.irq = _irq, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF, \
}
static struct plat_serial8250_port a20r_data[] = {
PORT(0x3f8, 4),
PORT(0x2f8, 3),
{ },
};
static struct platform_device a20r_serial8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = a20r_data,
},
};
static struct resource a20r_ds1216_rsrc[] = {
{
.start = 0x1c081ffc,
.end = 0x1c081fff,
.flags = IORESOURCE_MEM
}
};
static struct platform_device a20r_ds1216_device = {
.name = "rtc-ds1216",
.num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
.resource = a20r_ds1216_rsrc
};
static struct resource snirm_82596_rsrc[] = {
{
.start = 0x18000000,
.end = 0x18000004,
.flags = IORESOURCE_MEM
},
{
.start = 0x18010000,
.end = 0x18010004,
.flags = IORESOURCE_MEM
},
{
.start = 0x1ff00000,
.end = 0x1ff00020,
.flags = IORESOURCE_MEM
},
{
.start = 22,
.end = 22,
.flags = IORESOURCE_IRQ
},
{
.flags = 0x01 /* 16bit mpu port access */
}
};
static struct platform_device snirm_82596_pdev = {
.name = "snirm_82596",
.num_resources = ARRAY_SIZE(snirm_82596_rsrc),
.resource = snirm_82596_rsrc
};
static struct resource snirm_53c710_rsrc[] = {
{
.start = 0x19000000,
.end = 0x190fffff,
.flags = IORESOURCE_MEM
},
{
.start = 19,
.end = 19,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device snirm_53c710_pdev = {
.name = "snirm_53c710",
.num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
.resource = snirm_53c710_rsrc
};
static struct resource sc26xx_rsrc[] = {
{
.start = 0x1c070000,
.end = 0x1c0700ff,
.flags = IORESOURCE_MEM
},
{
.start = 20,
.end = 20,
.flags = IORESOURCE_IRQ
}
};
static unsigned int sc26xx_data[2] = {
/* DTR | RTS | DSR | CTS | DCD | RI */
(8 << 0) | (4 << 4) | (6 << 8) | (0 << 12) | (6 << 16) | (0 << 20),
(3 << 0) | (2 << 4) | (1 << 8) | (2 << 12) | (3 << 16) | (4 << 20)
};
static struct platform_device sc26xx_pdev = {
.name = "SC26xx",
.num_resources = ARRAY_SIZE(sc26xx_rsrc),
.resource = sc26xx_rsrc,
.dev = {
.platform_data = sc26xx_data,
}
};
static u32 a20r_ack_hwint(void)
{
u32 status = read_c0_status();
write_c0_status(status | 0x00010000);
asm volatile(
" .set push \n"
" .set noat \n"
" .set noreorder \n"
" lw $1, 0(%0) \n"
" sb $0, 0(%1) \n"
" sync \n"
" lb %1, 0(%1) \n"
" b 1f \n"
" ori %1, $1, 2 \n"
" .align 8 \n"
"1: \n"
" nop \n"
" sw %1, 0(%0) \n"
" sync \n"
" li %1, 0x20 \n"
"2: \n"
" nop \n"
" bnez %1,2b \n"
" addiu %1, -1 \n"
" sw $1, 0(%0) \n"
" sync \n"
".set pop \n"
:
: "Jr" (PCIMT_UCONF), "Jr" (0xbc000000));
write_c0_status(status);
return status;
}
static inline void unmask_a20r_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
irq_enable_hazard();
}
static inline void mask_a20r_irq(struct irq_data *d)
{
clear_c0_status(0x100 << (d->irq - SNI_A20R_IRQ_BASE));
irq_disable_hazard();
}
static struct irq_chip a20r_irq_type = {
.name = "A20R",
.irq_mask = mask_a20r_irq,
.irq_unmask = unmask_a20r_irq,
};
/*
* hwint 0 receive all interrupts
*/
static void a20r_hwint(void)
{
u32 cause, status;
int irq;
clear_c0_status(IE_IRQ0);
status = a20r_ack_hwint();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
set_c0_status(IE_IRQ0);
}
void __init sni_a20r_irq_init(void)
{
int i;
for (i = SNI_A20R_IRQ_BASE + 2 ; i < SNI_A20R_IRQ_BASE + 8; i++)
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
setup_irq(SNI_A20R_IRQ_BASE + 3, &sni_isa_irq);
}
void sni_a20r_init(void)
{
/* FIXME, remove if not needed */
}
static int __init snirm_a20r_setup_devinit(void)
{
switch (sni_brd_type) {
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
platform_device_register(&snirm_82596_pdev);
platform_device_register(&snirm_53c710_pdev);
platform_device_register(&sc26xx_pdev);
platform_device_register(&a20r_serial8250_device);
platform_device_register(&a20r_ds1216_device);
sni_eisa_root_init();
break;
}
return 0;
}
device_initcall(snirm_a20r_setup_devinit);
| gpl-2.0 |
edoko/android_samsung_galaxy_pop | drivers/media/dvb/mantis/mantis_core.c | 8299 | 5893 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "mantis_common.h"
#include "mantis_core.h"
#include "mantis_vp1033.h"
#include "mantis_vp1034.h"
#include "mantis_vp1041.h"
#include "mantis_vp2033.h"
#include "mantis_vp2040.h"
#include "mantis_vp3030.h"
static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
{
int err;
struct i2c_msg msg[] = {
{
.addr = 0x50,
.flags = 0,
.buf = data,
.len = 1
}, {
.addr = 0x50,
.flags = I2C_M_RD,
.buf = data,
.len = length
},
};
err = i2c_transfer(&mantis->adapter, msg, 2);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1,
"ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
err, data[0], data[1]);
return err;
}
return 0;
}
static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
{
int err;
struct i2c_msg msg = {
.addr = 0x50,
.flags = 0,
.buf = data,
.len = length
};
err = i2c_transfer(&mantis->adapter, &msg, 1);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1,
"ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >",
err, length, data[0], data[1]);
return err;
}
return 0;
}
static int get_mac_address(struct mantis_pci *mantis)
{
int err;
mantis->mac_address[0] = 0x08;
err = read_eeprom_byte(mantis, &mantis->mac_address[0], 6);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1, "Mantis EEPROM read error");
return err;
}
dprintk(verbose, MANTIS_ERROR, 0,
" MAC Address=[%pM]\n", mantis->mac_address);
return 0;
}
#define MANTIS_MODEL_UNKNOWN "UNKNOWN"
#define MANTIS_DEV_UNKNOWN "UNKNOWN"
struct mantis_hwconfig unknown_device = {
.model_name = MANTIS_MODEL_UNKNOWN,
.dev_type = MANTIS_DEV_UNKNOWN,
};
static void mantis_load_config(struct mantis_pci *mantis)
{
switch (mantis->subsystem_device) {
case MANTIS_VP_1033_DVB_S: /* VP-1033 */
mantis->hwconfig = &vp1033_mantis_config;
break;
case MANTIS_VP_1034_DVB_S: /* VP-1034 */
mantis->hwconfig = &vp1034_mantis_config;
break;
case MANTIS_VP_1041_DVB_S2: /* VP-1041 */
case TECHNISAT_SKYSTAR_HD2:
mantis->hwconfig = &vp1041_mantis_config;
break;
case MANTIS_VP_2033_DVB_C: /* VP-2033 */
mantis->hwconfig = &vp2033_mantis_config;
break;
case MANTIS_VP_2040_DVB_C: /* VP-2040 */
case TERRATEC_CINERGY_C_PCI: /* VP-2040 clone */
case TECHNISAT_CABLESTAR_HD2:
mantis->hwconfig = &vp2040_mantis_config;
break;
case MANTIS_VP_3030_DVB_T: /* VP-3030 */
mantis->hwconfig = &vp3030_mantis_config;
break;
default:
mantis->hwconfig = &unknown_device;
break;
}
}
int mantis_core_init(struct mantis_pci *mantis)
{
int err = 0;
mantis_load_config(mantis);
dprintk(verbose, MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
mantis->hwconfig->model_name, mantis->hwconfig->dev_type,
mantis->pdev->bus->number, PCI_SLOT(mantis->pdev->devfn), PCI_FUNC(mantis->pdev->devfn));
dprintk(verbose, MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
mantis->revision,
mantis->subsystem_vendor, mantis->subsystem_device);
dprintk(verbose, MANTIS_ERROR, 0,
"irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
mantis->pdev->irq, mantis->latency,
mantis->mantis_addr, mantis->mantis_mmio);
err = mantis_i2c_init(mantis);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1, "Mantis I2C init failed");
return err;
}
err = get_mac_address(mantis);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1, "get MAC address failed");
return err;
}
err = mantis_dma_init(mantis);
if (err < 0) {
dprintk(verbose, MANTIS_ERROR, 1, "Mantis DMA init failed");
return err;
}
err = mantis_dvb_init(mantis);
if (err < 0) {
dprintk(verbose, MANTIS_DEBUG, 1, "Mantis DVB init failed");
return err;
}
err = mantis_uart_init(mantis);
if (err < 0) {
dprintk(verbose, MANTIS_DEBUG, 1, "Mantis UART init failed");
return err;
}
return 0;
}
int mantis_core_exit(struct mantis_pci *mantis)
{
mantis_dma_stop(mantis);
dprintk(verbose, MANTIS_ERROR, 1, "DMA engine stopping");
mantis_uart_exit(mantis);
dprintk(verbose, MANTIS_ERROR, 1, "UART exit failed");
if (mantis_dma_exit(mantis) < 0)
dprintk(verbose, MANTIS_ERROR, 1, "DMA exit failed");
if (mantis_dvb_exit(mantis) < 0)
dprintk(verbose, MANTIS_ERROR, 1, "DVB exit failed");
if (mantis_i2c_exit(mantis) < 0)
dprintk(verbose, MANTIS_ERROR, 1, "I2C adapter delete.. failed");
return 0;
}
/* Turn the given bit on or off. */
void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
{
u32 cur;
cur = mmread(MANTIS_GPIF_ADDR);
if (value)
mantis->gpio_status = cur | (1 << bitpos);
else
mantis->gpio_status = cur & (~(1 << bitpos));
mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
mmwrite(0x00, MANTIS_GPIF_DOUT);
udelay(100);
}
/* direction = 0 , no CI passthrough ; 1 , CI passthrough */
void mantis_set_direction(struct mantis_pci *mantis, int direction)
{
u32 reg;
reg = mmread(0x28);
dprintk(verbose, MANTIS_DEBUG, 1, "TS direction setup");
if (direction == 0x01) {
/* to CI */
reg |= 0x04;
mmwrite(reg, 0x28);
reg &= 0xff - 0x04;
mmwrite(reg, 0x28);
} else {
reg &= 0xff - 0x04;
mmwrite(reg, 0x28);
reg |= 0x04;
mmwrite(reg, 0x28);
}
}
| gpl-2.0 |
vredniiy/sprout | arch/unicore32/kernel/irq.c | 8299 | 8524 | /*
* linux/arch/unicore32/kernel/irq.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/syscore_ops.h>
#include <linux/gpio.h>
#include <mach/hardware.h>
#include "setup.h"
/*
* PKUnity GPIO edge detection for IRQs:
* IRQs are generated on Falling-Edge, Rising-Edge, or both.
* Use this instead of directly setting GRER/GFER.
*/
static int GPIO_IRQ_rising_edge;
static int GPIO_IRQ_falling_edge;
static int GPIO_IRQ_mask = 0;
#define GPIO_MASK(irq) (1 << (irq - IRQ_GPIO0))
static int puv3_gpio_type(struct irq_data *d, unsigned int type)
{
unsigned int mask;
if (d->irq < IRQ_GPIOHIGH)
mask = 1 << d->irq;
else
mask = GPIO_MASK(d->irq);
if (type == IRQ_TYPE_PROBE) {
if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & IRQ_TYPE_EDGE_RISING)
GPIO_IRQ_rising_edge |= mask;
else
GPIO_IRQ_rising_edge &= ~mask;
if (type & IRQ_TYPE_EDGE_FALLING)
GPIO_IRQ_falling_edge |= mask;
else
GPIO_IRQ_falling_edge &= ~mask;
writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
return 0;
}
/*
* GPIO IRQs must be acknowledged. This is for IRQs from 0 to 7.
*/
static void puv3_low_gpio_ack(struct irq_data *d)
{
writel((1 << d->irq), GPIO_GEDR);
}
static void puv3_low_gpio_mask(struct irq_data *d)
{
writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
}
static void puv3_low_gpio_unmask(struct irq_data *d)
{
writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
}
static int puv3_low_gpio_wake(struct irq_data *d, unsigned int on)
{
if (on)
writel(readl(PM_PWER) | (1 << d->irq), PM_PWER);
else
writel(readl(PM_PWER) & ~(1 << d->irq), PM_PWER);
return 0;
}
static struct irq_chip puv3_low_gpio_chip = {
.name = "GPIO-low",
.irq_ack = puv3_low_gpio_ack,
.irq_mask = puv3_low_gpio_mask,
.irq_unmask = puv3_low_gpio_unmask,
.irq_set_type = puv3_gpio_type,
.irq_set_wake = puv3_low_gpio_wake,
};
/*
* IRQ8 (GPIO0 through 27) handler. We enter here with the
* irq_controller_lock held, and IRQs disabled. Decode the IRQ
* and call the handler.
*/
static void
puv3_gpio_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned int mask;
mask = readl(GPIO_GEDR);
do {
/*
* clear down all currently active IRQ sources.
* We will be processing them all.
*/
writel(mask, GPIO_GEDR);
irq = IRQ_GPIO0;
do {
if (mask & 1)
generic_handle_irq(irq);
mask >>= 1;
irq++;
} while (mask);
mask = readl(GPIO_GEDR);
} while (mask);
}
/*
* GPIO0-27 edge IRQs need to be handled specially.
* In addition, the IRQs are all collected up into one bit in the
* interrupt controller registers.
*/
static void puv3_high_gpio_ack(struct irq_data *d)
{
unsigned int mask = GPIO_MASK(d->irq);
writel(mask, GPIO_GEDR);
}
static void puv3_high_gpio_mask(struct irq_data *d)
{
unsigned int mask = GPIO_MASK(d->irq);
GPIO_IRQ_mask &= ~mask;
writel(readl(GPIO_GRER) & ~mask, GPIO_GRER);
writel(readl(GPIO_GFER) & ~mask, GPIO_GFER);
}
static void puv3_high_gpio_unmask(struct irq_data *d)
{
unsigned int mask = GPIO_MASK(d->irq);
GPIO_IRQ_mask |= mask;
writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
}
static int puv3_high_gpio_wake(struct irq_data *d, unsigned int on)
{
if (on)
writel(readl(PM_PWER) | PM_PWER_GPIOHIGH, PM_PWER);
else
writel(readl(PM_PWER) & ~PM_PWER_GPIOHIGH, PM_PWER);
return 0;
}
static struct irq_chip puv3_high_gpio_chip = {
.name = "GPIO-high",
.irq_ack = puv3_high_gpio_ack,
.irq_mask = puv3_high_gpio_mask,
.irq_unmask = puv3_high_gpio_unmask,
.irq_set_type = puv3_gpio_type,
.irq_set_wake = puv3_high_gpio_wake,
};
/*
* We don't need to ACK IRQs on the PKUnity unless they're GPIOs
* this is for internal IRQs i.e. from 8 to 31.
*/
static void puv3_mask_irq(struct irq_data *d)
{
writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
}
static void puv3_unmask_irq(struct irq_data *d)
{
writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
}
/*
* Apart form GPIOs, only the RTC alarm can be a wakeup event.
*/
static int puv3_set_wake(struct irq_data *d, unsigned int on)
{
if (d->irq == IRQ_RTCAlarm) {
if (on)
writel(readl(PM_PWER) | PM_PWER_RTC, PM_PWER);
else
writel(readl(PM_PWER) & ~PM_PWER_RTC, PM_PWER);
return 0;
}
return -EINVAL;
}
static struct irq_chip puv3_normal_chip = {
.name = "PKUnity-v3",
.irq_ack = puv3_mask_irq,
.irq_mask = puv3_mask_irq,
.irq_unmask = puv3_unmask_irq,
.irq_set_wake = puv3_set_wake,
};
static struct resource irq_resource = {
.name = "irqs",
.start = io_v2p(PKUNITY_INTC_BASE),
.end = io_v2p(PKUNITY_INTC_BASE) + 0xFFFFF,
};
static struct puv3_irq_state {
unsigned int saved;
unsigned int icmr;
unsigned int iclr;
unsigned int iccr;
} puv3_irq_state;
static int puv3_irq_suspend(void)
{
struct puv3_irq_state *st = &puv3_irq_state;
st->saved = 1;
st->icmr = readl(INTC_ICMR);
st->iclr = readl(INTC_ICLR);
st->iccr = readl(INTC_ICCR);
/*
* Disable all GPIO-based interrupts.
*/
writel(readl(INTC_ICMR) & ~(0x1ff), INTC_ICMR);
/*
* Set the appropriate edges for wakeup.
*/
writel(readl(PM_PWER) & GPIO_IRQ_rising_edge, GPIO_GRER);
writel(readl(PM_PWER) & GPIO_IRQ_falling_edge, GPIO_GFER);
/*
* Clear any pending GPIO interrupts.
*/
writel(readl(GPIO_GEDR), GPIO_GEDR);
return 0;
}
static void puv3_irq_resume(void)
{
struct puv3_irq_state *st = &puv3_irq_state;
if (st->saved) {
writel(st->iccr, INTC_ICCR);
writel(st->iclr, INTC_ICLR);
writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
writel(st->icmr, INTC_ICMR);
}
}
static struct syscore_ops puv3_irq_syscore_ops = {
.suspend = puv3_irq_suspend,
.resume = puv3_irq_resume,
};
static int __init puv3_irq_init_syscore(void)
{
register_syscore_ops(&puv3_irq_syscore_ops);
return 0;
}
device_initcall(puv3_irq_init_syscore);
void __init init_IRQ(void)
{
unsigned int irq;
request_resource(&iomem_resource, &irq_resource);
/* disable all IRQs */
writel(0, INTC_ICMR);
/* all IRQs are IRQ, not REAL */
writel(0, INTC_ICLR);
/* clear all GPIO edge detects */
writel(FMASK(8, 0) & ~FIELD(1, 1, GPI_SOFF_REQ), GPIO_GPIR);
writel(0, GPIO_GFER);
writel(0, GPIO_GRER);
writel(0x0FFFFFFF, GPIO_GEDR);
writel(1, INTC_ICCR);
for (irq = 0; irq < IRQ_GPIOHIGH; irq++) {
irq_set_chip(irq, &puv3_low_gpio_chip);
irq_set_handler(irq, handle_edge_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
0);
}
for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) {
irq_set_chip(irq, &puv3_normal_chip);
irq_set_handler(irq, handle_level_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOAUTOEN,
IRQ_NOPROBE);
}
for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) {
irq_set_chip(irq, &puv3_high_gpio_chip);
irq_set_handler(irq, handle_edge_irq);
irq_modify_status(irq,
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
0);
}
/*
* Install handler for GPIO 0-27 edge detect interrupts
*/
irq_set_chip(IRQ_GPIOHIGH, &puv3_normal_chip);
irq_set_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler);
#ifdef CONFIG_PUV3_GPIO
puv3_init_gpio();
#endif
}
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
* own 'handler'
*/
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (unlikely(irq >= nr_irqs)) {
if (printk_ratelimit())
printk(KERN_WARNING "Bad IRQ%u\n", irq);
ack_bad_irq(irq);
} else {
generic_handle_irq(irq);
}
irq_exit();
set_irq_regs(old_regs);
}
| gpl-2.0 |
vm03/android_kernel_lge_msm8610 | arch/x86/platform/olpc/olpc_ofw.c | 12395 | 2761 | #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/olpc_ofw.h>
/* address of OFW callback interface; will be NULL if OFW isn't found */
static int (*olpc_ofw_cif)(int *);
/* page dir entry containing OFW's pgdir table; filled in by head_32.S */
u32 olpc_ofw_pgd __initdata;
static DEFINE_SPINLOCK(ofw_lock);
#define MAXARGS 10
void __init setup_olpc_ofw_pgd(void)
{
pgd_t *base, *ofw_pde;
if (!olpc_ofw_cif)
return;
/* fetch OFW's PDE */
base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
if (!base) {
printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n");
olpc_ofw_cif = NULL;
return;
}
ofw_pde = &base[OLPC_OFW_PDE_NR];
/* install OFW's PDE permanently into the kernel's pgtable */
set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde);
/* implicit optimization barrier here due to uninline function return */
early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
}
int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res,
void **res)
{
int ofw_args[MAXARGS + 3];
unsigned long flags;
int ret, i, *p;
BUG_ON(nr_args + nr_res > MAXARGS);
if (!olpc_ofw_cif)
return -EIO;
ofw_args[0] = (int)name;
ofw_args[1] = nr_args;
ofw_args[2] = nr_res;
p = &ofw_args[3];
for (i = 0; i < nr_args; i++, p++)
*p = (int)args[i];
/* call into ofw */
spin_lock_irqsave(&ofw_lock, flags);
ret = olpc_ofw_cif(ofw_args);
spin_unlock_irqrestore(&ofw_lock, flags);
if (!ret) {
for (i = 0; i < nr_res; i++, p++)
*((int *)res[i]) = *p;
}
return ret;
}
EXPORT_SYMBOL_GPL(__olpc_ofw);
bool olpc_ofw_present(void)
{
return olpc_ofw_cif != NULL;
}
EXPORT_SYMBOL_GPL(olpc_ofw_present);
/* OFW cif _should_ be above this address */
#define OFW_MIN 0xff000000
/* OFW starts on a 1MB boundary */
#define OFW_BOUND (1<<20)
void __init olpc_ofw_detect(void)
{
struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
unsigned long start;
/* ensure OFW booted us by checking for "OFW " string */
if (hdr->ofw_magic != OLPC_OFW_SIG)
return;
olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;
if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
(unsigned long)olpc_ofw_cif);
olpc_ofw_cif = NULL;
return;
}
/* determine where OFW starts in memory */
start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
(unsigned long)olpc_ofw_cif, (-start) >> 20);
reserve_top_address(-start);
}
bool __init olpc_ofw_is_installed(void)
{
return olpc_ofw_cif != NULL;
}
| gpl-2.0 |
fishbowlFX/CM11-LGD325ds_kernel | drivers/net/fddi/skfp/hwmtm.c | 12651 | 56623 | /******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
#ifndef lint
static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ;
#endif
#define HWMTM
#ifndef FDDI
#define FDDI
#endif
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#include "h/skfbiinc.h"
/*
-------------------------------------------------------------
DOCUMENTATION
-------------------------------------------------------------
BEGIN_MANUAL_ENTRY(DOCUMENTATION)
T B D
END_MANUAL_ENTRY
*/
/*
-------------------------------------------------------------
LOCAL VARIABLES:
-------------------------------------------------------------
*/
#ifdef COMMON_MB_POOL
static SMbuf *mb_start = 0 ;
static SMbuf *mb_free = 0 ;
static int mb_init = FALSE ;
static int call_count = 0 ;
#endif
/*
-------------------------------------------------------------
EXTERNE VARIABLES:
-------------------------------------------------------------
*/
#ifdef DEBUG
#ifndef DEBUG_BRD
extern struct smt_debug debug ;
#endif
#endif
#ifdef NDIS_OS2
extern u_char offDepth ;
extern u_char force_irq_pending ;
#endif
/*
-------------------------------------------------------------
LOCAL FUNCTIONS:
-------------------------------------------------------------
*/
static void queue_llc_rx(struct s_smc *smc, SMbuf *mb);
static void smt_to_llc(struct s_smc *smc, SMbuf *mb);
static void init_txd_ring(struct s_smc *smc);
static void init_rxd_ring(struct s_smc *smc);
static void queue_txd_mb(struct s_smc *smc, SMbuf *mb);
static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start,
int count);
static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
static SMbuf* get_llc_rx(struct s_smc *smc);
static SMbuf* get_txd_mb(struct s_smc *smc);
static void mac_drv_clear_txd(struct s_smc *smc);
/*
-------------------------------------------------------------
EXTERNAL FUNCTIONS:
-------------------------------------------------------------
*/
/* The external SMT functions are listed in cmtdef.h */
extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size);
extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size);
extern void mac_drv_fill_rxd(struct s_smc *smc);
extern void mac_drv_tx_complete(struct s_smc *smc,
volatile struct s_smt_fp_txd *txd);
extern void mac_drv_rx_complete(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd,
int frag_count, int len);
extern void mac_drv_requeue_rxd(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd,
int frag_count);
extern void mac_drv_clear_rxd(struct s_smc *smc,
volatile struct s_smt_fp_rxd *rxd, int frag_count);
#ifdef USE_OS_CPY
extern void hwm_cpy_rxd2mb(void);
extern void hwm_cpy_txd2mb(void);
#endif
#ifdef ALL_RX_COMPLETE
extern void mac_drv_all_receives_complete(void);
#endif
extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt);
extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag);
#ifdef NDIS_OS2
extern void post_proc(void);
#else
extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
int flag);
#endif
extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
int la_len);
/*
-------------------------------------------------------------
PUBLIC FUNCTIONS:
-------------------------------------------------------------
*/
void process_receive(struct s_smc *smc);
void fddi_isr(struct s_smc *smc);
void smt_free_mbuf(struct s_smc *smc, SMbuf *mb);
void init_driver_fplus(struct s_smc *smc);
void mac_drv_rx_mode(struct s_smc *smc, int mode);
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
void mac_drv_clear_tx_queue(struct s_smc *smc);
void mac_drv_clear_rx_queue(struct s_smc *smc);
void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status);
void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status);
int mac_drv_init(struct s_smc *smc);
int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
int frame_status);
u_int mac_drv_check_space(void);
SMbuf* smt_get_mbuf(struct s_smc *smc);
#ifdef DEBUG
void mac_drv_debug_lev(void);
#endif
/*
-------------------------------------------------------------
MACROS:
-------------------------------------------------------------
*/
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
#else
#define UNUSED(x)
#endif
#endif
#ifdef USE_CAN_ADDR
#define MA smc->hw.fddi_canon_addr.a
#define GROUP_ADDR_BIT 0x01
#else
#define MA smc->hw.fddi_home_addr.a
#define GROUP_ADDR_BIT 0x80
#endif
#define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\
SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT)
#ifdef MB_OUTSIDE_SMC
#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\
MAX_MBUF*sizeof(SMbuf))
#define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
#else
#define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd))
#endif
/*
* define critical read for 16 Bit drivers
*/
#if defined(NDIS_OS2) || defined(ODI2)
#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
#else
#define CR_READ(var) (__le32)(var)
#endif
#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \
IS_R1_C | IS_XA_C | IS_XS_C)
/*
-------------------------------------------------------------
INIT- AND SMT FUNCTIONS:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(mac_drv_check_space)
* u_int mac_drv_check_space()
*
* function DOWNCALL (drvsr.c)
* This function calculates the needed non virtual
* memory for MBufs, RxD and TxD descriptors etc.
* needed by the driver.
*
* return u_int memory in bytes
*
* END_MANUAL_ENTRY
*/
u_int mac_drv_check_space(void)
{
#ifdef MB_OUTSIDE_SMC
#ifdef COMMON_MB_POOL
call_count++ ;
if (call_count == 1) {
return EXT_VIRT_MEM;
}
else {
return EXT_VIRT_MEM_2;
}
#else
return EXT_VIRT_MEM;
#endif
#else
return 0;
#endif
}
/*
* BEGIN_MANUAL_ENTRY(mac_drv_init)
* void mac_drv_init(smc)
*
* function DOWNCALL (drvsr.c)
* In this function the hardware module allocates it's
* memory.
* The operating system dependent module should call
* mac_drv_init once, after the adatper is detected.
* END_MANUAL_ENTRY
*/
int mac_drv_init(struct s_smc *smc)
{
if (sizeof(struct s_smt_fp_rxd) % 16) {
SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ;
}
if (sizeof(struct s_smt_fp_txd) % 16) {
SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ;
}
/*
* get the required memory for the RxDs and TxDs
*/
if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
mac_drv_get_desc_mem(smc,(u_int)
(RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
return 1; /* no space the hwm modul can't work */
}
/*
* get the memory for the SMT MBufs
*/
#ifndef MB_OUTSIDE_SMC
smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ;
#else
#ifndef COMMON_MB_POOL
if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
return 1; /* no space the hwm modul can't work */
}
#else
if (!mb_start) {
if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
MAX_MBUF*sizeof(SMbuf)))) {
return 1; /* no space the hwm modul can't work */
}
}
#endif
#endif
return 0;
}
/*
* BEGIN_MANUAL_ENTRY(init_driver_fplus)
* init_driver_fplus(smc)
*
* Sets hardware modul specific values for the mode register 2
* (e.g. the byte alignment for the received frames, the position of the
* least significant byte etc.)
* END_MANUAL_ENTRY
*/
void init_driver_fplus(struct s_smc *smc)
{
smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ;
#ifdef PCI
smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ;
#endif
smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ;
#ifdef USE_CAN_ADDR
/* enable address bit swapping */
smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ;
#endif
}
static u_long init_descr_ring(struct s_smc *smc,
union s_fp_descr volatile *start,
int count)
{
int i ;
union s_fp_descr volatile *d1 ;
union s_fp_descr volatile *d2 ;
u_long phys ;
DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
for (i=count-1, d1=start; i ; i--) {
d2 = d1 ;
d1++ ; /* descr is owned by the host */
d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d2->r.rxd_next = &d1->r ;
phys = mac_drv_virt2phys(smc,(void *)d1) ;
d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
}
DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
d1->r.rxd_next = &start->r ;
phys = mac_drv_virt2phys(smc,(void *)start) ;
d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
for (i=count, d1=start; i ; i--) {
DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
d1++;
}
return phys;
}
static void init_txd_ring(struct s_smc *smc)
{
struct s_smt_fp_txd volatile *ds ;
struct s_smt_tx_queue *queue ;
u_long phys ;
/*
* initialize the transmit descriptors
*/
ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
queue = smc->hw.fp.tx[QUEUE_A0] ;
DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_ASYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
ds-- ;
queue->tx_free = HWM_ASYNC_TXD_COUNT ;
queue->tx_used = 0 ;
outpd(ADDR(B5_XA_DA),phys) ;
ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
queue = smc->hw.fp.tx[QUEUE_S] ;
DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
HWM_SYNC_TXD_COUNT) ;
phys = le32_to_cpu(ds->txd_ntdadr) ;
ds++ ;
queue->tx_curr_put = queue->tx_curr_get = ds ;
queue->tx_free = HWM_SYNC_TXD_COUNT ;
queue->tx_used = 0 ;
outpd(ADDR(B5_XS_DA),phys) ;
}
static void init_rxd_ring(struct s_smc *smc)
{
struct s_smt_fp_rxd volatile *ds ;
struct s_smt_rx_queue *queue ;
u_long phys ;
/*
* initialize the receive descriptors
*/
ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
queue = smc->hw.fp.rx[QUEUE_R1] ;
DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
(void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
SMT_R1_RXD_COUNT) ;
phys = le32_to_cpu(ds->rxd_nrdadr) ;
ds++ ;
queue->rx_curr_put = queue->rx_curr_get = ds ;
queue->rx_free = SMT_R1_RXD_COUNT ;
queue->rx_used = 0 ;
outpd(ADDR(B4_R1_DA),phys) ;
}
/*
* BEGIN_MANUAL_ENTRY(init_fddi_driver)
* void init_fddi_driver(smc,mac_addr)
*
* initializes the driver and it's variables
*
* END_MANUAL_ENTRY
*/
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
{
SMbuf *mb ;
int i ;
init_board(smc,mac_addr) ;
(void)init_fplus(smc) ;
/*
* initialize the SMbufs for the SMT
*/
#ifndef COMMON_MB_POOL
mb = smc->os.hwm.mbuf_pool.mb_start ;
smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ;
for (i = 0; i < MAX_MBUF; i++) {
mb->sm_use_count = 1 ;
smt_free_mbuf(smc,mb) ;
mb++ ;
}
#else
mb = mb_start ;
if (!mb_init) {
mb_free = 0 ;
for (i = 0; i < MAX_MBUF; i++) {
mb->sm_use_count = 1 ;
smt_free_mbuf(smc,mb) ;
mb++ ;
}
mb_init = TRUE ;
}
#endif
/*
* initialize the other variables
*/
smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ;
smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ;
smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ;
smc->os.hwm.pass_llc_promisc = TRUE ;
smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ;
smc->os.hwm.detec_count = 0 ;
smc->os.hwm.rx_break = 0 ;
smc->os.hwm.rx_len_error = 0 ;
smc->os.hwm.isr_flag = FALSE ;
/*
* make sure that the start pointer is 16 byte aligned
*/
i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
if (i != 16) {
DB_GEN("i = %d",i,0,3) ;
smc->os.hwm.descr_p = (union s_fp_descr volatile *)
((char *)smc->os.hwm.descr_p+i) ;
}
DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
init_txd_ring(smc) ;
init_rxd_ring(smc) ;
mac_drv_fill_rxd(smc) ;
init_plc(smc) ;
}
SMbuf *smt_get_mbuf(struct s_smc *smc)
{
register SMbuf *mb ;
#ifndef COMMON_MB_POOL
mb = smc->os.hwm.mbuf_pool.mb_free ;
#else
mb = mb_free ;
#endif
if (mb) {
#ifndef COMMON_MB_POOL
smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ;
#else
mb_free = mb->sm_next ;
#endif
mb->sm_off = 8 ;
mb->sm_use_count = 1 ;
}
DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
return mb; /* May be NULL */
}
void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
{
if (mb) {
mb->sm_use_count-- ;
DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
/*
* If the use_count is != zero the MBuf is queued
* more than once and must not queued into the
* free MBuf queue
*/
if (!mb->sm_use_count) {
DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
#ifndef COMMON_MB_POOL
mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
smc->os.hwm.mbuf_pool.mb_free = mb ;
#else
mb->sm_next = mb_free ;
mb_free = mb ;
#endif
}
}
else
SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ;
}
/*
* BEGIN_MANUAL_ENTRY(mac_drv_repair_descr)
* void mac_drv_repair_descr(smc)
*
* function called from SMT (HWM / hwmtm.c)
* The BMU is idle when this function is called.
* Mac_drv_repair_descr sets up the physical address
* for all receive and transmit queues where the BMU
* should continue.
* It may be that the BMU was reseted during a fragmented
* transfer. In this case there are some fragments which will
* never completed by the BMU. The OWN bit of this fragments
* must be switched to be owned by the host.
*
* Give a start command to the receive BMU.
* Start the transmit BMUs if transmit frames pending.
*
* END_MANUAL_ENTRY
*/
void mac_drv_repair_descr(struct s_smc *smc)
{
u_long phys ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ;
return ;
}
/*
* repair tx queues: don't start
*/
phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ;
outpd(ADDR(B5_XA_DA),phys) ;
if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) {
outpd(ADDR(B0_XA_CSR),CSR_START) ;
}
phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ;
outpd(ADDR(B5_XS_DA),phys) ;
if (smc->hw.fp.tx_q[QUEUE_S].tx_used) {
outpd(ADDR(B0_XS_CSR),CSR_START) ;
}
/*
* repair rx queues
*/
phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ;
outpd(ADDR(B4_R1_DA),phys) ;
outpd(ADDR(B0_R1_CSR),CSR_START) ;
}
static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
{
int i ;
int tx_used ;
u_long phys ;
u_long tbctrl ;
struct s_smt_fp_txd volatile *t ;
SK_UNUSED(smc) ;
t = queue->tx_curr_get ;
tx_used = queue->tx_used ;
for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
t = t->txd_next ;
}
phys = le32_to_cpu(t->txd_ntdadr) ;
t = queue->tx_curr_get ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
tbctrl = le32_to_cpu(t->txd_tbctrl) ;
if (tbctrl & BMU_OWN) {
if (tbctrl & BMU_STF) {
break ; /* exit the loop */
}
else {
/*
* repair the descriptor
*/
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
phys = le32_to_cpu(t->txd_ntdadr) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
}
return phys;
}
/*
* Repairs the receive descriptor ring and returns the physical address
* where the BMU should continue working.
*
* o The physical address where the BMU was stopped has to be
* determined. This is the next RxD after rx_curr_get with an OWN
* bit set.
* o The BMU should start working at beginning of the next frame.
* RxDs with an OWN bit set but with a reset STF bit should be
* skipped and owned by the driver (OWN = 0).
*/
static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
{
int i ;
int rx_used ;
u_long phys ;
u_long rbctrl ;
struct s_smt_fp_rxd volatile *r ;
SK_UNUSED(smc) ;
r = queue->rx_curr_get ;
rx_used = queue->rx_used ;
for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
r = r->rxd_next ;
}
phys = le32_to_cpu(r->rxd_nrdadr) ;
r = queue->rx_curr_get ;
while (rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
if (rbctrl & BMU_OWN) {
if (rbctrl & BMU_STF) {
break ; /* exit the loop */
}
else {
/*
* repair the descriptor
*/
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
}
}
phys = le32_to_cpu(r->rxd_nrdadr) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
rx_used-- ;
}
return phys;
}
/*
-------------------------------------------------------------
INTERRUPT SERVICE ROUTINE:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(fddi_isr)
* void fddi_isr(smc)
*
* function DOWNCALL (drvsr.c)
* interrupt service routine, handles the interrupt requests
* generated by the FDDI adapter.
*
* NOTE: The operating system dependent module must guarantee that the
* interrupts of the adapter are disabled when it calls fddi_isr.
*
* About the USE_BREAK_ISR mechanismn:
*
* The main requirement of this mechanismn is to force an timer IRQ when
* leaving process_receive() with leave_isr set. process_receive() may
* be called at any time from anywhere!
* To be sure we don't miss such event we set 'force_irq' per default.
* We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND
* 'force_irq' are set. 'force_irq' may be reset if a receive complete
* IRQ is pending.
*
* END_MANUAL_ENTRY
*/
void fddi_isr(struct s_smc *smc)
{
u_long is ; /* ISR source */
u_short stu, stl ;
SMbuf *mb ;
#ifdef USE_BREAK_ISR
int force_irq ;
#endif
#ifdef ODI2
if (smc->os.hwm.rx_break) {
mac_drv_fill_rxd(smc) ;
if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) {
smc->os.hwm.rx_break = 0 ;
process_receive(smc) ;
}
else {
smc->os.hwm.detec_count = 0 ;
smt_force_irq(smc) ;
}
}
#endif
smc->os.hwm.isr_flag = TRUE ;
#ifdef USE_BREAK_ISR
force_irq = TRUE ;
if (smc->os.hwm.leave_isr) {
smc->os.hwm.leave_isr = FALSE ;
process_receive(smc) ;
}
#endif
while ((is = GET_ISR() & ISR_MASK)) {
NDD_TRACE("CH0B",is,0,0) ;
DB_GEN("ISA = 0x%x",is,0,7) ;
if (is & IMASK_SLOW) {
NDD_TRACE("CH1b",is,0,0) ;
if (is & IS_PLINT1) { /* PLC1 */
plc1_irq(smc) ;
}
if (is & IS_PLINT2) { /* PLC2 */
plc2_irq(smc) ;
}
if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */
stu = inpw(FM_A(FM_ST1U)) ;
stl = inpw(FM_A(FM_ST1L)) ;
DB_GEN("Slow transmit complete",0,0,6) ;
mac1_irq(smc,stu,stl) ;
}
if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */
stu= inpw(FM_A(FM_ST2U)) ;
stl= inpw(FM_A(FM_ST2L)) ;
DB_GEN("Slow receive complete",0,0,6) ;
DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
mac2_irq(smc,stu,stl) ;
}
if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */
stu= inpw(FM_A(FM_ST3U)) ;
stl= inpw(FM_A(FM_ST3L)) ;
DB_GEN("FORMAC Mode Register 3",0,0,6) ;
mac3_irq(smc,stu,stl) ;
}
if (is & IS_TIMINT) { /* Timer 82C54-2 */
timer_irq(smc) ;
#ifdef NDIS_OS2
force_irq_pending = 0 ;
#endif
/*
* out of RxD detection
*/
if (++smc->os.hwm.detec_count > 4) {
/*
* check out of RxD condition
*/
process_receive(smc) ;
}
}
if (is & IS_TOKEN) { /* Restricted Token Monitor */
rtm_irq(smc) ;
}
if (is & IS_R1_P) { /* Parity error rx queue 1 */
/* clear IRQ */
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ;
SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ;
}
if (is & IS_R1_C) { /* Encoding error rx queue 1 */
/* clear IRQ */
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ;
}
if (is & IS_XA_C) { /* Encoding error async tx q */
/* clear IRQ */
outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ;
}
if (is & IS_XS_C) { /* Encoding error sync tx q */
/* clear IRQ */
outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ;
SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ;
}
}
/*
* Fast Tx complete Async/Sync Queue (BMU service)
*/
if (is & (IS_XS_F|IS_XA_F)) {
DB_GEN("Fast tx complete queue",0,0,6) ;
/*
* clear IRQ, Note: no IRQ is lost, because
* we always service both queues
*/
outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ;
outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ;
mac_drv_clear_txd(smc) ;
llc_restart_tx(smc) ;
}
/*
* Fast Rx Complete (BMU service)
*/
if (is & IS_R1_F) {
DB_GEN("Fast receive complete",0,0,6) ;
/* clear IRQ */
#ifndef USE_BREAK_ISR
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
process_receive(smc) ;
#else
process_receive(smc) ;
if (smc->os.hwm.leave_isr) {
force_irq = FALSE ;
} else {
outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
process_receive(smc) ;
}
#endif
}
#ifndef NDIS_OS2
while ((mb = get_llc_rx(smc))) {
smt_to_llc(smc,mb) ;
}
#else
if (offDepth)
post_proc() ;
while (!offDepth && (mb = get_llc_rx(smc))) {
smt_to_llc(smc,mb) ;
}
if (!offDepth && smc->os.hwm.rx_break) {
process_receive(smc) ;
}
#endif
if (smc->q.ev_get != smc->q.ev_put) {
NDD_TRACE("CH2a",0,0,0) ;
ev_dispatcher(smc) ;
}
#ifdef NDIS_OS2
post_proc() ;
if (offDepth) { /* leave fddi_isr because */
break ; /* indications not allowed */
}
#endif
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr) {
break ; /* leave fddi_isr */
}
#endif
/* NOTE: when the isr is left, no rx is pending */
} /* end of interrupt source polling loop */
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr && force_irq) {
smt_force_irq(smc) ;
}
#endif
smc->os.hwm.isr_flag = FALSE ;
NDD_TRACE("CH0E",0,0,0) ;
}
/*
-------------------------------------------------------------
RECEIVE FUNCTIONS:
-------------------------------------------------------------
*/
#ifndef NDIS_OS2
/*
* BEGIN_MANUAL_ENTRY(mac_drv_rx_mode)
* void mac_drv_rx_mode(smc,mode)
*
* function DOWNCALL (fplus.c)
* Corresponding to the parameter mode, the operating system
* dependent module can activate several receive modes.
*
* para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts
* = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts"
* = 3: RX_ENABLE_PROMISC enable promiscuous
* = 4: RX_DISABLE_PROMISC disable promiscuous
* = 5: RX_ENABLE_NSA enable rec. of all NSA frames
* (disabled after 'driver reset' & 'set station address')
* = 6: RX_DISABLE_NSA disable rec. of all NSA frames
*
* = 21: RX_ENABLE_PASS_SMT ( see description )
* = 22: RX_DISABLE_PASS_SMT ( " " )
* = 23: RX_ENABLE_PASS_NSA ( " " )
* = 24: RX_DISABLE_PASS_NSA ( " " )
* = 25: RX_ENABLE_PASS_DB ( " " )
* = 26: RX_DISABLE_PASS_DB ( " " )
* = 27: RX_DISABLE_PASS_ALL ( " " )
* = 28: RX_DISABLE_LLC_PROMISC ( " " )
* = 29: RX_ENABLE_LLC_PROMISC ( " " )
*
*
* RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_SMT, the hardware module
* duplicates all SMT frames with the frame control
* FC_SMT_INFO and passes them to the LLC receive channel
* by calling mac_drv_rx_init.
* The SMT Frames which are sent by the local SMT and the NSA
* frames whose A- and C-Indicator is not set are also duplicated
* and passed.
* The receive mode RX_DISABLE_PASS_SMT disables the passing
* of SMT frames.
*
* RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_NSA, the hardware module
* duplicates all NSA frames with frame control FC_SMT_NSA
* and a set A-Indicator and passed them to the LLC
* receive channel by calling mac_drv_rx_init.
* All NSA Frames which are sent by the local SMT
* are also duplicated and passed.
* The receive mode RX_DISABLE_PASS_NSA disables the passing
* of NSA frames with the A- or C-Indicator set.
*
* NOTE: For fear that the hardware module receives NSA frames with
* a reset A-Indicator, the operating system dependent module
* has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA
* before activate the RX_ENABLE_PASS_NSA mode and after every
* 'driver reset' and 'set station address'.
*
* RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB
*
* If the operating system dependent module activates the
* mode RX_ENABLE_PASS_DB, direct BEACON frames
* (FC_BEACON frame control) are passed to the LLC receive
* channel by mac_drv_rx_init.
* The receive mode RX_DISABLE_PASS_DB disables the passing
* of direct BEACON frames.
*
* RX_DISABLE_PASS_ALL
*
* Disables all special receives modes. It is equal to
* call mac_drv_set_rx_mode successively with the
* parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT,
* RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB.
*
* RX_ENABLE_LLC_PROMISC
*
* (default) all received LLC frames and all SMT/NSA/DBEACON
* frames depending on the attitude of the flags
* PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the
* LLC layer
*
* RX_DISABLE_LLC_PROMISC
*
* all received SMT/NSA/DBEACON frames depending on the
* attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON
* will be delivered to the LLC layer.
* all received LLC frames with a directed address, Multicast
* or Broadcast address will be delivered to the LLC
* layer too.
*
* END_MANUAL_ENTRY
*/
void mac_drv_rx_mode(struct s_smc *smc, int mode)
{
switch(mode) {
case RX_ENABLE_PASS_SMT:
smc->os.hwm.pass_SMT = TRUE ;
break ;
case RX_DISABLE_PASS_SMT:
smc->os.hwm.pass_SMT = FALSE ;
break ;
case RX_ENABLE_PASS_NSA:
smc->os.hwm.pass_NSA = TRUE ;
break ;
case RX_DISABLE_PASS_NSA:
smc->os.hwm.pass_NSA = FALSE ;
break ;
case RX_ENABLE_PASS_DB:
smc->os.hwm.pass_DB = TRUE ;
break ;
case RX_DISABLE_PASS_DB:
smc->os.hwm.pass_DB = FALSE ;
break ;
case RX_DISABLE_PASS_ALL:
smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ;
smc->os.hwm.pass_DB = FALSE ;
smc->os.hwm.pass_llc_promisc = TRUE ;
mac_set_rx_mode(smc,RX_DISABLE_NSA) ;
break ;
case RX_DISABLE_LLC_PROMISC:
smc->os.hwm.pass_llc_promisc = FALSE ;
break ;
case RX_ENABLE_LLC_PROMISC:
smc->os.hwm.pass_llc_promisc = TRUE ;
break ;
case RX_ENABLE_ALLMULTI:
case RX_DISABLE_ALLMULTI:
case RX_ENABLE_PROMISC:
case RX_DISABLE_PROMISC:
case RX_ENABLE_NSA:
case RX_DISABLE_NSA:
default:
mac_set_rx_mode(smc,mode) ;
break ;
}
}
#endif /* ifndef NDIS_OS2 */
/*
* process receive queue
*/
void process_receive(struct s_smc *smc)
{
int i ;
int n ;
int frag_count ; /* number of RxDs of the curr rx buf */
int used_frags ; /* number of RxDs of the curr frame */
struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */
struct s_smt_fp_rxd volatile *r ; /* rxd pointer */
struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */
u_long rbctrl ; /* receive buffer control word */
u_long rfsw ; /* receive frame status word */
u_short rx_used ;
u_char far *virt ;
char far *data ;
SMbuf *mb ;
u_char fc ; /* Frame control */
int len ; /* Frame length */
smc->os.hwm.detec_count = 0 ;
queue = smc->hw.fp.rx[QUEUE_R1] ;
NDD_TRACE("RHxB",0,0,0) ;
for ( ; ; ) {
r = queue->rx_curr_get ;
rx_used = queue->rx_used ;
frag_count = 0 ;
#ifdef USE_BREAK_ISR
if (smc->os.hwm.leave_isr) {
goto rx_end ;
}
#endif
#ifdef NDIS_OS2
if (offDepth) {
smc->os.hwm.rx_break = 1 ;
goto rx_end ;
}
smc->os.hwm.rx_break = 0 ;
#endif
#ifdef ODI2
if (smc->os.hwm.rx_break) {
goto rx_end ;
}
#endif
n = 0 ;
do {
DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
if (rbctrl & BMU_OWN) {
NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
DB_RX("End of RxDs",0,0,4) ;
goto rx_end ;
}
/*
* out of RxD detection
*/
if (!rx_used) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ;
/* Either we don't have an RxD or all
* RxDs are filled. Therefore it's allowed
* for to set the STOPPED flag */
smc->hw.hw_state = STOPPED ;
mac_drv_clear_rx_queue(smc) ;
smc->hw.hw_state = STARTED ;
mac_drv_fill_rxd(smc) ;
smc->os.hwm.detec_count = 0 ;
goto rx_end ;
}
rfsw = le32_to_cpu(r->rxd_rfsw) ;
if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
/*
* The BMU_STF bit is deleted, 1 frame is
* placed into more than 1 rx buffer
*
* skip frame by setting the rx len to 0
*
* if fragment count == 0
* The missing STF bit belongs to the
* current frame, search for the
* EOF bit to complete the frame
* else
* the fragment belongs to the next frame,
* exit the loop and process the frame
*/
SK_BREAK() ;
rfsw = 0 ;
if (frag_count) {
break ;
}
}
n += rbctrl & 0xffff ;
r = r->rxd_next ;
frag_count++ ;
rx_used-- ;
} while (!(rbctrl & BMU_EOF)) ;
used_frags = frag_count ;
DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
/* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
/* BMU_ST_BUF will not be changed by the ASIC */
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX("Check STF bit in %x",(void *)r,0,5) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
rx_used-- ;
}
DB_RX("STF bit found",0,0,5) ;
/*
* The received frame is finished for the process receive
*/
rxd = queue->rx_curr_get ;
queue->rx_curr_get = r ;
queue->rx_free += frag_count ;
queue->rx_used = rx_used ;
/*
* ASIC Errata no. 7 (STF - Bit Bug)
*/
rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
smc->hw.fp.err_stats.err_valid++ ;
smc->mib.m[MAC0].fddiMACCopied_Ct++ ;
/* the length of the data including the FC */
len = (rfsw & RD_LENGTH) - 4 ;
DB_RX("frame length = %d",len,0,4) ;
/*
* check the frame_length and all error flags
*/
if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
if (rfsw & RD_S_MSRABT) {
DB_RX("Frame aborted by the FORMAC",0,0,2) ;
smc->hw.fp.err_stats.err_abort++ ;
}
/*
* check frame status
*/
if (rfsw & RD_S_SEAC2) {
DB_RX("E-Indicator set",0,0,2) ;
smc->hw.fp.err_stats.err_e_indicator++ ;
}
if (rfsw & RD_S_SFRMERR) {
DB_RX("CRC error",0,0,2) ;
smc->hw.fp.err_stats.err_crc++ ;
}
if (rfsw & RX_FS_IMPL) {
DB_RX("Implementer frame",0,0,2) ;
smc->hw.fp.err_stats.err_imp_frame++ ;
}
goto abort_frame ;
}
if (len > FDDI_RAW_MTU-4) {
DB_RX("Frame too long error",0,0,2) ;
smc->hw.fp.err_stats.err_too_long++ ;
goto abort_frame ;
}
/*
* SUPERNET 3 Bug: FORMAC delivers status words
* of aborded frames to the BMU
*/
if (len <= 4) {
DB_RX("Frame length = 0",0,0,2) ;
goto abort_frame ;
}
if (len != (n-4)) {
DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
smc->os.hwm.rx_len_error++ ;
goto abort_frame ;
}
/*
* Check SA == MA
*/
virt = (u_char far *) rxd->rxd_virt ;
DB_RX("FC = %x",*virt,0,2) ;
if (virt[12] == MA[5] &&
virt[11] == MA[4] &&
virt[10] == MA[3] &&
virt[9] == MA[2] &&
virt[8] == MA[1] &&
(virt[7] & ~GROUP_ADDR_BIT) == MA[0]) {
goto abort_frame ;
}
/*
* test if LLC frame
*/
if (rfsw & RX_FS_LLC) {
/*
* if pass_llc_promisc is disable
* if DA != Multicast or Broadcast or DA!=MA
* abort the frame
*/
if (!smc->os.hwm.pass_llc_promisc) {
if(!(virt[1] & GROUP_ADDR_BIT)) {
if (virt[6] != MA[5] ||
virt[5] != MA[4] ||
virt[4] != MA[3] ||
virt[3] != MA[2] ||
virt[2] != MA[1] ||
virt[1] != MA[0]) {
DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
goto abort_frame ;
}
}
}
/*
* LLC frame received
*/
DB_RX("LLC - receive",0,0,4) ;
mac_drv_rx_complete(smc,rxd,frag_count,len) ;
}
else {
if (!(mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
DB_RX("No SMbuf; receive terminated",0,0,4) ;
goto abort_frame ;
}
data = smtod(mb,char *) - 1 ;
/*
* copy the frame into a SMT_MBuf
*/
#ifdef USE_OS_CPY
hwm_cpy_rxd2mb(rxd,data,len) ;
#else
for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
memcpy(data,r->rxd_virt,n) ;
data += n ;
}
data = smtod(mb,char *) - 1 ;
#endif
fc = *(char *)mb->sm_data = *data ;
mb->sm_len = len - 1 ; /* len - fc */
data++ ;
/*
* SMT frame received
*/
switch(fc) {
case FC_SMT_INFO :
smc->hw.fp.err_stats.err_smt_frame++ ;
DB_RX("SMT frame received ",0,0,5) ;
if (smc->os.hwm.pass_SMT) {
DB_RX("pass SMT frame ",0,0,5) ;
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX("requeue RxD",0,0,5) ;
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
break ;
case FC_SMT_NSA :
smc->hw.fp.err_stats.err_smt_frame++ ;
DB_RX("SMT frame received ",0,0,5) ;
/* if pass_NSA set pass the NSA frame or */
/* pass_SMT set and the A-Indicator */
/* is not set, pass the NSA frame */
if (smc->os.hwm.pass_NSA ||
(smc->os.hwm.pass_SMT &&
!(rfsw & A_INDIC))) {
DB_RX("pass SMT frame ",0,0,5) ;
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX("requeue RxD",0,0,5) ;
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_received_pack(smc,mb,(int)(rfsw>>25)) ;
break ;
case FC_BEACON :
if (smc->os.hwm.pass_DB) {
DB_RX("pass DB frame ",0,0,5) ;
mac_drv_rx_complete(smc, rxd,
frag_count,len) ;
}
else {
DB_RX("requeue RxD",0,0,5) ;
mac_drv_requeue_rxd(smc,rxd,frag_count);
}
smt_free_mbuf(smc,mb) ;
break ;
default :
/*
* unknown FC abord the frame
*/
DB_RX("unknown FC error",0,0,2) ;
smt_free_mbuf(smc,mb) ;
DB_RX("requeue RxD",0,0,5) ;
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
if ((fc & 0xf0) == FC_MAC)
smc->hw.fp.err_stats.err_mac_frame++ ;
else
smc->hw.fp.err_stats.err_imp_frame++ ;
break ;
}
}
DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
continue ;
/*--------------------------------------------------------------------*/
abort_frame:
DB_RX("requeue RxD",0,0,5) ;
mac_drv_requeue_rxd(smc,rxd,frag_count) ;
DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
}
rx_end:
#ifdef ALL_RX_COMPLETE
mac_drv_all_receives_complete(smc) ;
#endif
return ; /* lint bug: needs return detect end of function */
}
static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
{
u_char fc ;
DB_RX("send a queued frame to the llc layer",0,0,4) ;
smc->os.hwm.r.len = mb->sm_len ;
smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
fc = *smc->os.hwm.r.mb_pos ;
(void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc,
smc->os.hwm.r.mb_pos,(int)mb->sm_len) ;
smt_free_mbuf(smc,mb) ;
}
/*
* BEGIN_MANUAL_ENTRY(hwm_rx_frag)
* void hwm_rx_frag(smc,virt,phys,len,frame_status)
*
* function MACRO (hardware module, hwmtm.h)
* This function calls dma_master for preparing the
* system hardware for the DMA transfer and initializes
* the current RxD with the length and the physical and
* virtual address of the fragment. Furthermore, it sets the
* STF and EOF bits depending on the frame status byte,
* switches the OWN flag of the RxD, so that it is owned by the
* adapter and issues an rx_start.
*
* para virt virtual pointer to the fragment
* len the length of the fragment
* frame_status status of the frame, see design description
*
* NOTE: It is possible to call this function with a fragment length
* of zero.
*
* END_MANUAL_ENTRY
*/
void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status)
{
struct s_smt_fp_rxd volatile *r ;
__le32 rbctrl;
NDD_TRACE("RHfB",virt,len,frame_status) ;
DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
r->rxd_virt = virt ;
r->rxd_rbadr = cpu_to_le32(phys) ;
rbctrl = cpu_to_le32( (((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG))<<26) |
(((u_long) frame_status & FIRST_FRAG) << 21) |
BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
r->rxd_rbctrl = rbctrl ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
outpd(ADDR(B0_R1_CSR),CSR_START) ;
smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
}
/*
* BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue)
*
* void mac_drv_clear_rx_queue(smc)
* struct s_smc *smc ;
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_rx_queue is called by the OS-specific module
* after it has issued a card_stop.
* In this case, the frames in the receive queue are obsolete and
* should be removed. For removing mac_drv_clear_rx_queue
* calls dma_master for each RxD and mac_drv_clear_rxd for each
* receive buffer.
*
* NOTE: calling sequence card_stop:
* CLI_FBI(), card_stop(),
* mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
*
* NOTE: The caller is responsible that the BMUs are idle
* when this function is called.
*
* END_MANUAL_ENTRY
*/
void mac_drv_clear_rx_queue(struct s_smc *smc)
{
struct s_smt_fp_rxd volatile *r ;
struct s_smt_fp_rxd volatile *next_rxd ;
struct s_smt_rx_queue *queue ;
int frag_count ;
int i ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ;
return ;
}
queue = smc->hw.fp.rx[QUEUE_R1] ;
DB_RX("clear_rx_queue",0,0,5) ;
/*
* dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
*/
r = queue->rx_curr_get ;
while (queue->rx_used) {
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
frag_count = 1 ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
while (r != queue->rx_curr_put &&
!(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
DB_RX("Check STF bit in %x",(void *)r,0,5) ;
r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
r = r->rxd_next ;
DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
frag_count++ ;
}
DB_RX("STF bit found",0,0,5) ;
next_rxd = r ;
for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
}
DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
(void *)queue->rx_curr_get,frag_count,5) ;
mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
queue->rx_curr_get = next_rxd ;
queue->rx_used -= frag_count ;
queue->rx_free += frag_count ;
}
}
/*
-------------------------------------------------------------
SEND FUNCTIONS:
-------------------------------------------------------------
*/
/*
* BEGIN_MANUAL_ENTRY(hwm_tx_init)
* int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status)
*
* function DOWN_CALL (hardware module, hwmtm.c)
* hwm_tx_init checks if the frame can be sent through the
* corresponding send queue.
*
* para fc the frame control. To determine through which
* send queue the frame should be transmitted.
* 0x50 - 0x57: asynchronous LLC frame
* 0xD0 - 0xD7: synchronous LLC frame
* 0x41, 0x4F: SMT frame to the network
* 0x42: SMT frame to the network and to the local SMT
* 0x43: SMT frame to the local SMT
* frag_count count of the fragments for this frame
* frame_len length of the frame
* frame_status status of the frame, the send queue bit is already
* specified
*
* return frame_status
*
* END_MANUAL_ENTRY
*/
int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
int frame_status)
{
NDD_TRACE("THiB",fc,frag_count,frame_len) ;
smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
smc->os.hwm.tx_len = frame_len ;
DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
frame_status |= LAN_TX ;
}
else {
switch (fc) {
case FC_SMT_INFO :
case FC_SMT_NSA :
frame_status |= LAN_TX ;
break ;
case FC_SMT_LOC :
frame_status |= LOC_TX ;
break ;
case FC_SMT_LAN_LOC :
frame_status |= LAN_TX | LOC_TX ;
break ;
default :
SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ;
}
}
if (!smc->hw.mac_ring_is_up) {
frame_status &= ~LAN_TX ;
frame_status |= RING_DOWN ;
DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
}
if (frag_count > smc->os.hwm.tx_p->tx_free) {
#ifndef NDIS_OS2
mac_drv_clear_txd(smc) ;
if (frag_count > smc->os.hwm.tx_p->tx_free) {
DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
}
#else
DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
frame_status &= ~LAN_TX ;
frame_status |= OUT_OF_TXD ;
#endif
}
DB_TX("frame_status = %x",frame_status,0,3) ;
NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
return frame_status;
}
/*
* BEGIN_MANUAL_ENTRY(hwm_tx_frag)
* void hwm_tx_frag(smc,virt,phys,len,frame_status)
*
* function DOWNCALL (hardware module, hwmtm.c)
* If the frame should be sent to the LAN, this function calls
* dma_master, fills the current TxD with the virtual and the
* physical address, sets the STF and EOF bits dependent on
* the frame status, and requests the BMU to start the
* transmit.
* If the frame should be sent to the local SMT, an SMT_MBuf
* is allocated if the FIRST_FRAG bit is set in the frame_status.
* The fragment of the frame is copied into the SMT MBuf.
* The function smt_received_pack is called if the LAST_FRAG
* bit is set in the frame_status word.
*
* para virt virtual pointer to the fragment
* len the length of the fragment
* frame_status status of the frame, see design description
*
* return nothing returned, no parameter is modified
*
* NOTE: It is possible to invoke this macro with a fragment length
* of zero.
*
* END_MANUAL_ENTRY
*/
void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
int frame_status)
{
struct s_smt_fp_txd volatile *t ;
struct s_smt_tx_queue *queue ;
__le32 tbctrl ;
queue = smc->os.hwm.tx_p ;
NDD_TRACE("THfB",virt,len,frame_status) ;
/* Bug fix: AF / May 31 1999 (#missing)
* snmpinfo problem reported by IBM is caused by invalid
* t-pointer (txd) if LAN_TX is not set but LOC_TX only.
* Set: t = queue->tx_curr_put here !
*/
t = queue->tx_curr_put ;
DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
if (frame_status & LAN_TX) {
/* '*t' is already defined */
DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
t->txd_virt = virt ;
t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
t->txd_tbadr = cpu_to_le32(phys) ;
tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
BMU_OWN|BMU_CHECK |len) ;
t->txd_tbctrl = tbctrl ;
#ifndef AIX
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(queue->tx_bmu_ctl,CSR_START) ;
#else /* ifndef AIX */
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
if (frame_status & QUEUE_A0) {
outpd(ADDR(B0_XA_CSR),CSR_START) ;
}
else {
outpd(ADDR(B0_XS_CSR),CSR_START) ;
}
#endif
queue->tx_free-- ;
queue->tx_used++ ;
queue->tx_curr_put = t->txd_next ;
if (frame_status & LAST_FRAG) {
smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
}
}
if (frame_status & LOC_TX) {
DB_TX("LOC_TX: ",0,0,3) ;
if (frame_status & FIRST_FRAG) {
if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
smc->hw.fp.err_stats.err_no_buf++ ;
DB_TX("No SMbuf; transmit terminated",0,0,4) ;
}
else {
smc->os.hwm.tx_data =
smtod(smc->os.hwm.tx_mb,char *) - 1 ;
#ifdef USE_OS_CPY
#ifdef PASS_1ST_TXD_2_TX_COMP
hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
smc->os.hwm.tx_len) ;
#endif
#endif
}
}
if (smc->os.hwm.tx_mb) {
#ifndef USE_OS_CPY
DB_TX("copy fragment into MBuf ",0,0,3) ;
memcpy(smc->os.hwm.tx_data,virt,len) ;
smc->os.hwm.tx_data += len ;
#endif
if (frame_status & LAST_FRAG) {
#ifdef USE_OS_CPY
#ifndef PASS_1ST_TXD_2_TX_COMP
/*
* hwm_cpy_txd2mb(txd,data,len) copies 'len'
* bytes from the virtual pointer in 'rxd'
* to 'data'. The virtual pointer of the
* os-specific tx-buffer should be written
* in the LAST txd.
*/
hwm_cpy_txd2mb(t,smc->os.hwm.tx_data,
smc->os.hwm.tx_len) ;
#endif /* nPASS_1ST_TXD_2_TX_COMP */
#endif /* USE_OS_CPY */
smc->os.hwm.tx_data =
smtod(smc->os.hwm.tx_mb,char *) - 1 ;
*(char *)smc->os.hwm.tx_mb->sm_data =
*smc->os.hwm.tx_data ;
smc->os.hwm.tx_data++ ;
smc->os.hwm.tx_mb->sm_len =
smc->os.hwm.tx_len - 1 ;
DB_TX("pass LLC frame to SMT ",0,0,3) ;
smt_received_pack(smc,smc->os.hwm.tx_mb,
RD_FS_LOCAL) ;
}
}
}
NDD_TRACE("THfE",t,queue->tx_free,0) ;
}
/*
* queues a receive for later send
*/
static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
{
DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
smc->os.hwm.queued_rx_frames++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.llc_rx_pipe == NULL) {
smc->os.hwm.llc_rx_pipe = mb ;
}
else {
smc->os.hwm.llc_rx_tail->sm_next = mb ;
}
smc->os.hwm.llc_rx_tail = mb ;
/*
* force an timer IRQ to receive the data
*/
if (!smc->os.hwm.isr_flag) {
smt_force_irq(smc) ;
}
}
/*
* get a SMbuf from the llc_rx_queue
*/
static SMbuf *get_llc_rx(struct s_smc *smc)
{
SMbuf *mb ;
if ((mb = smc->os.hwm.llc_rx_pipe)) {
smc->os.hwm.queued_rx_frames-- ;
smc->os.hwm.llc_rx_pipe = mb->sm_next ;
}
DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
return mb;
}
/*
* queues a transmit SMT MBuf during the time were the MBuf is
* queued the TxD ring
*/
static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
{
DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
smc->os.hwm.queued_txd_mb++ ;
mb->sm_next = (SMbuf *)NULL ;
if (smc->os.hwm.txd_tx_pipe == NULL) {
smc->os.hwm.txd_tx_pipe = mb ;
}
else {
smc->os.hwm.txd_tx_tail->sm_next = mb ;
}
smc->os.hwm.txd_tx_tail = mb ;
}
/*
* get a SMbuf from the txd_tx_queue
*/
static SMbuf *get_txd_mb(struct s_smc *smc)
{
SMbuf *mb ;
if ((mb = smc->os.hwm.txd_tx_pipe)) {
smc->os.hwm.queued_txd_mb-- ;
smc->os.hwm.txd_tx_pipe = mb->sm_next ;
}
DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
return mb;
}
/*
* SMT Send function
*/
void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
{
char far *data ;
int len ;
int n ;
int i ;
int frag_count ;
int frame_status ;
SK_LOC_DECL(char far,*virt[3]) ;
int frag_len[3] ;
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t ;
u_long phys ;
__le32 tbctrl;
NDD_TRACE("THSB",mb,fc,0) ;
DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
mb->sm_off-- ; /* set to fc */
mb->sm_len++ ; /* + fc */
data = smtod(mb,char *) ;
*data = fc ;
if (fc == FC_SMT_LOC)
*data = FC_SMT_INFO ;
/*
* determine the frag count and the virt addresses of the frags
*/
frag_count = 0 ;
len = mb->sm_len ;
while (len) {
n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ;
if (n >= len) {
n = len ;
}
DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
virt[frag_count] = data ;
frag_len[frag_count] = n ;
frag_count++ ;
len -= n ;
data += n ;
}
/*
* determine the frame status
*/
queue = smc->hw.fp.tx[QUEUE_A0] ;
if (fc == FC_BEACON || fc == FC_SMT_LOC) {
frame_status = LOC_TX ;
}
else {
frame_status = LAN_TX ;
if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) ||
(smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO)))
frame_status |= LOC_TX ;
}
if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
frame_status &= ~LAN_TX;
if (frame_status) {
DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
}
else {
DB_TX("Ring is down: terminate transmission",0,0,2) ;
smt_free_mbuf(smc,mb) ;
return ;
}
}
DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
mb->sm_use_count = 2 ;
}
if (frame_status & LAN_TX) {
t = queue->tx_curr_put ;
frame_status |= FIRST_FRAG ;
for (i = 0; i < frag_count; i++) {
DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
if (i == frag_count-1) {
frame_status |= LAST_FRAG ;
t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
(((__u32)(mb->sm_len-1)&3) << 27)) ;
}
t->txd_virt = virt[i] ;
phys = dma_master(smc, (void far *)virt[i],
frag_len[i], DMA_RD|SMT_BUF) ;
t->txd_tbadr = cpu_to_le32(phys) ;
tbctrl = cpu_to_le32((((__u32)frame_status &
(FIRST_FRAG|LAST_FRAG)) << 26) |
BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
t->txd_tbctrl = tbctrl ;
#ifndef AIX
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(queue->tx_bmu_ctl,CSR_START) ;
#else
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
outpd(ADDR(B0_XA_CSR),CSR_START) ;
#endif
frame_status &= ~FIRST_FRAG ;
queue->tx_curr_put = t = t->txd_next ;
queue->tx_free-- ;
queue->tx_used++ ;
}
smc->mib.m[MAC0].fddiMACTransmit_Ct++ ;
queue_txd_mb(smc,mb) ;
}
if (frame_status & LOC_TX) {
DB_TX("pass Mbuf to LLC queue",0,0,5) ;
queue_llc_rx(smc,mb) ;
}
/*
* We need to unqueue the free SMT_MBUFs here, because it may
* be that the SMT want's to send more than 1 frame for one down call
*/
mac_drv_clear_txd(smc) ;
NDD_TRACE("THSE",t,queue->tx_free,frag_count) ;
}
/* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd)
* void mac_drv_clear_txd(smc)
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_txd searches in both send queues for TxD's
* which were finished by the adapter. It calls dma_complete
* for each TxD. If the last fragment of an LLC frame is
* reached, it calls mac_drv_tx_complete to release the
* send buffer.
*
* return nothing
*
* END_MANUAL_ENTRY
*/
static void mac_drv_clear_txd(struct s_smc *smc)
{
struct s_smt_tx_queue *queue ;
struct s_smt_fp_txd volatile *t1 ;
struct s_smt_fp_txd volatile *t2 = NULL ;
SMbuf *mb ;
u_long tbctrl ;
int i ;
int frag_count ;
int n ;
NDD_TRACE("THcB",0,0,0) ;
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
t1 = queue->tx_curr_get ;
DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
for ( ; ; ) {
frag_count = 0 ;
do {
DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
if (tbctrl & BMU_OWN || !queue->tx_used){
DB_TX("End of TxDs queue %d",i,0,4) ;
goto free_next_queue ; /* next queue */
}
t1 = t1->txd_next ;
frag_count++ ;
} while (!(tbctrl & BMU_EOF)) ;
t1 = queue->tx_curr_get ;
for (n = frag_count; n; n--) {
tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
dma_complete(smc,
(union s_fp_descr volatile *) t1,
(int) (DMA_RD |
((tbctrl & BMU_SMT_TX) >> 18))) ;
t2 = t1 ;
t1 = t1->txd_next ;
}
if (tbctrl & BMU_SMT_TX) {
mb = get_txd_mb(smc) ;
smt_free_mbuf(smc,mb) ;
}
else {
#ifndef PASS_1ST_TXD_2_TX_COMP
DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ;
mac_drv_tx_complete(smc,t2) ;
#else
DB_TX("mac_drv_tx_comp for TxD 0x%x",
queue->tx_curr_get,0,4) ;
mac_drv_tx_complete(smc,queue->tx_curr_get) ;
#endif
}
queue->tx_curr_get = t1 ;
queue->tx_free += frag_count ;
queue->tx_used -= frag_count ;
}
free_next_queue: ;
}
NDD_TRACE("THcE",0,0,0) ;
}
/*
* BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue)
*
* void mac_drv_clear_tx_queue(smc)
* struct s_smc *smc ;
*
* function DOWNCALL (hardware module, hwmtm.c)
* mac_drv_clear_tx_queue is called from the SMT when
* the RMT state machine has entered the ISOLATE state.
* This function is also called by the os-specific module
* after it has called the function card_stop().
* In this case, the frames in the send queues are obsolete and
* should be removed.
*
* note calling sequence:
* CLI_FBI(), card_stop(),
* mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(),
*
* NOTE: The caller is responsible that the BMUs are idle
* when this function is called.
*
* END_MANUAL_ENTRY
*/
void mac_drv_clear_tx_queue(struct s_smc *smc)
{
struct s_smt_fp_txd volatile *t ;
struct s_smt_tx_queue *queue ;
int tx_used ;
int i ;
if (smc->hw.hw_state != STOPPED) {
SK_BREAK() ;
SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ;
return ;
}
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
/*
* switch the OWN bit of all pending frames to the host
*/
t = queue->tx_curr_get ;
tx_used = queue->tx_used ;
while (tx_used) {
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
t = t->txd_next ;
tx_used-- ;
}
}
/*
* release all TxD's for both send queues
*/
mac_drv_clear_txd(smc) ;
for (i = QUEUE_S; i <= QUEUE_A0; i++) {
queue = smc->hw.fp.tx[i] ;
t = queue->tx_curr_get ;
/*
* write the phys pointer of the NEXT descriptor into the
* BMU's current address descriptor pointer and set
* tx_curr_get and tx_curr_put to this position
*/
if (i == QUEUE_S) {
outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
else {
outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
}
queue->tx_curr_put = queue->tx_curr_get->txd_next ;
queue->tx_curr_get = queue->tx_curr_put ;
}
}
/*
-------------------------------------------------------------
TEST FUNCTIONS:
-------------------------------------------------------------
*/
#ifdef DEBUG
/*
* BEGIN_MANUAL_ENTRY(mac_drv_debug_lev)
* void mac_drv_debug_lev(smc,flag,lev)
*
* function DOWNCALL (drvsr.c)
* To get a special debug info the user can assign a debug level
* to any debug flag.
*
* para flag debug flag, possible values are:
* = 0: reset all debug flags (the defined level is
* ignored)
* = 1: debug.d_smtf
* = 2: debug.d_smt
* = 3: debug.d_ecm
* = 4: debug.d_rmt
* = 5: debug.d_cfm
* = 6: debug.d_pcm
*
* = 10: debug.d_os.hwm_rx (hardware module receive path)
* = 11: debug.d_os.hwm_tx(hardware module transmit path)
* = 12: debug.d_os.hwm_gen(hardware module general flag)
*
* lev debug level
*
* END_MANUAL_ENTRY
*/
void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev)
{
switch(flag) {
case (int)NULL:
DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ;
DB_P.d_cfm = 0 ;
DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ;
#ifdef SBA
DB_P.d_sba = 0 ;
#endif
#ifdef ESS
DB_P.d_ess = 0 ;
#endif
break ;
case DEBUG_SMTF:
DB_P.d_smtf = lev ;
break ;
case DEBUG_SMT:
DB_P.d_smt = lev ;
break ;
case DEBUG_ECM:
DB_P.d_ecm = lev ;
break ;
case DEBUG_RMT:
DB_P.d_rmt = lev ;
break ;
case DEBUG_CFM:
DB_P.d_cfm = lev ;
break ;
case DEBUG_PCM:
DB_P.d_pcm = lev ;
break ;
case DEBUG_SBA:
#ifdef SBA
DB_P.d_sba = lev ;
#endif
break ;
case DEBUG_ESS:
#ifdef ESS
DB_P.d_ess = lev ;
#endif
break ;
case DB_HWM_RX:
DB_P.d_os.hwm_rx = lev ;
break ;
case DB_HWM_TX:
DB_P.d_os.hwm_tx = lev ;
break ;
case DB_HWM_GEN:
DB_P.d_os.hwm_gen = lev ;
break ;
default:
break ;
}
}
#endif
| gpl-2.0 |
jdlfg/Mecha-kernel-jdlfg | fs/ntfs/bitmap.c | 14443 | 5603 | /*
* bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef NTFS_RW
#include <linux/pagemap.h>
#include "bitmap.h"
#include "debug.h"
#include "aops.h"
#include "ntfs.h"
/**
* __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
* @vi: vfs inode describing the bitmap
* @start_bit: first bit to set
* @count: number of bits to set
* @value: value to set the bits to (i.e. 0 or 1)
* @is_rollback: if 'true' this is a rollback operation
*
* Set @count bits starting at bit @start_bit in the bitmap described by the
* vfs inode @vi to @value, where @value is either 0 or 1.
*
* @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const bool is_rollback)
{
s64 cnt = count;
pgoff_t index, end_index;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
int pos, len;
u8 bit;
BUG_ON(!vi);
ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, "
"value %u.%s", vi->i_ino, (unsigned long long)start_bit,
(unsigned long long)cnt, (unsigned int)value,
is_rollback ? " (rollback)" : "");
BUG_ON(start_bit < 0);
BUG_ON(cnt < 0);
BUG_ON(value > 1);
/*
* Calculate the indices for the pages containing the first and last
* bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
*/
index = start_bit >> (3 + PAGE_CACHE_SHIFT);
end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
/* Get the page containing the first bit (@start_bit). */
mapping = vi->i_mapping;
page = ntfs_map_page(mapping, index);
if (IS_ERR(page)) {
if (!is_rollback)
ntfs_error(vi->i_sb, "Failed to map first page (error "
"%li), aborting.", PTR_ERR(page));
return PTR_ERR(page);
}
kaddr = page_address(page);
/* Set @pos to the position of the byte containing @start_bit. */
pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
/* Calculate the position of @start_bit in the first byte. */
bit = start_bit & 7;
/* If the first byte is partial, modify the appropriate bits in it. */
if (bit) {
u8 *byte = kaddr + pos;
while ((bit & 7) && cnt) {
cnt--;
if (value)
*byte |= 1 << bit++;
else
*byte &= ~(1 << bit++);
}
/* If we are done, unmap the page and return success. */
if (!cnt)
goto done;
/* Update @pos to the new position. */
pos++;
}
/*
* Depending on @value, modify all remaining whole bytes in the page up
* to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
memset(kaddr + pos, value ? 0xff : 0, len);
cnt -= len << 3;
/* Update @len to point to the first not-done byte in the page. */
if (cnt < 8)
len += pos;
/* If we are not in the last page, deal with all subsequent pages. */
while (index < end_index) {
BUG_ON(cnt <= 0);
/* Update @index and get the next page. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
page = ntfs_map_page(mapping, ++index);
if (IS_ERR(page))
goto rollback;
kaddr = page_address(page);
/*
* Depending on @value, modify all remaining whole bytes in the
* page up to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
memset(kaddr, value ? 0xff : 0, len);
cnt -= len << 3;
}
/*
* The currently mapped page is the last one. If the last byte is
* partial, modify the appropriate bits in it. Note, @len is the
* position of the last byte inside the page.
*/
if (cnt) {
u8 *byte;
BUG_ON(cnt > 7);
bit = cnt;
byte = kaddr + len;
while (bit--) {
if (value)
*byte |= 1 << bit;
else
*byte &= ~(1 << bit);
}
}
done:
/* We are done. Unmap the page and return success. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
ntfs_debug("Done.");
return 0;
rollback:
/*
* Current state:
* - no pages are mapped
* - @count - @cnt is the number of bits that have been modified
*/
if (is_rollback)
return PTR_ERR(page);
if (count != cnt)
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
value ? 0 : 1, true);
else
pos = 0;
if (!pos) {
/* Rollback was successful. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li), aborting.", PTR_ERR(page));
} else {
/* Rollback failed. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li) and rollback failed (error %i). "
"Aborting and leaving inconsistent metadata. "
"Unmount and run chkdsk.", PTR_ERR(page), pos);
NVolSetErrors(NTFS_SB(vi->i_sb));
}
return PTR_ERR(page);
}
#endif /* NTFS_RW */
| gpl-2.0 |
ptmr3/GalaxyNote3-Kernel-kk- | fs/ntfs/bitmap.c | 14443 | 5603 | /*
* bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project.
*
* Copyright (c) 2004-2005 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the Linux-NTFS
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef NTFS_RW
#include <linux/pagemap.h>
#include "bitmap.h"
#include "debug.h"
#include "aops.h"
#include "ntfs.h"
/**
* __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
* @vi: vfs inode describing the bitmap
* @start_bit: first bit to set
* @count: number of bits to set
* @value: value to set the bits to (i.e. 0 or 1)
* @is_rollback: if 'true' this is a rollback operation
*
* Set @count bits starting at bit @start_bit in the bitmap described by the
* vfs inode @vi to @value, where @value is either 0 or 1.
*
* @is_rollback should always be 'false', it is for internal use to rollback
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
*
* Return 0 on success and -errno on error.
*/
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
const s64 count, const u8 value, const bool is_rollback)
{
s64 cnt = count;
pgoff_t index, end_index;
struct address_space *mapping;
struct page *page;
u8 *kaddr;
int pos, len;
u8 bit;
BUG_ON(!vi);
ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, "
"value %u.%s", vi->i_ino, (unsigned long long)start_bit,
(unsigned long long)cnt, (unsigned int)value,
is_rollback ? " (rollback)" : "");
BUG_ON(start_bit < 0);
BUG_ON(cnt < 0);
BUG_ON(value > 1);
/*
* Calculate the indices for the pages containing the first and last
* bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
*/
index = start_bit >> (3 + PAGE_CACHE_SHIFT);
end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT);
/* Get the page containing the first bit (@start_bit). */
mapping = vi->i_mapping;
page = ntfs_map_page(mapping, index);
if (IS_ERR(page)) {
if (!is_rollback)
ntfs_error(vi->i_sb, "Failed to map first page (error "
"%li), aborting.", PTR_ERR(page));
return PTR_ERR(page);
}
kaddr = page_address(page);
/* Set @pos to the position of the byte containing @start_bit. */
pos = (start_bit >> 3) & ~PAGE_CACHE_MASK;
/* Calculate the position of @start_bit in the first byte. */
bit = start_bit & 7;
/* If the first byte is partial, modify the appropriate bits in it. */
if (bit) {
u8 *byte = kaddr + pos;
while ((bit & 7) && cnt) {
cnt--;
if (value)
*byte |= 1 << bit++;
else
*byte &= ~(1 << bit++);
}
/* If we are done, unmap the page and return success. */
if (!cnt)
goto done;
/* Update @pos to the new position. */
pos++;
}
/*
* Depending on @value, modify all remaining whole bytes in the page up
* to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos);
memset(kaddr + pos, value ? 0xff : 0, len);
cnt -= len << 3;
/* Update @len to point to the first not-done byte in the page. */
if (cnt < 8)
len += pos;
/* If we are not in the last page, deal with all subsequent pages. */
while (index < end_index) {
BUG_ON(cnt <= 0);
/* Update @index and get the next page. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
page = ntfs_map_page(mapping, ++index);
if (IS_ERR(page))
goto rollback;
kaddr = page_address(page);
/*
* Depending on @value, modify all remaining whole bytes in the
* page up to @cnt.
*/
len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE);
memset(kaddr, value ? 0xff : 0, len);
cnt -= len << 3;
}
/*
* The currently mapped page is the last one. If the last byte is
* partial, modify the appropriate bits in it. Note, @len is the
* position of the last byte inside the page.
*/
if (cnt) {
u8 *byte;
BUG_ON(cnt > 7);
bit = cnt;
byte = kaddr + len;
while (bit--) {
if (value)
*byte |= 1 << bit;
else
*byte &= ~(1 << bit);
}
}
done:
/* We are done. Unmap the page and return success. */
flush_dcache_page(page);
set_page_dirty(page);
ntfs_unmap_page(page);
ntfs_debug("Done.");
return 0;
rollback:
/*
* Current state:
* - no pages are mapped
* - @count - @cnt is the number of bits that have been modified
*/
if (is_rollback)
return PTR_ERR(page);
if (count != cnt)
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
value ? 0 : 1, true);
else
pos = 0;
if (!pos) {
/* Rollback was successful. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li), aborting.", PTR_ERR(page));
} else {
/* Rollback failed. */
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
"%li) and rollback failed (error %i). "
"Aborting and leaving inconsistent metadata. "
"Unmount and run chkdsk.", PTR_ERR(page), pos);
NVolSetErrors(NTFS_SB(vi->i_sb));
}
return PTR_ERR(page);
}
#endif /* NTFS_RW */
| gpl-2.0 |
Motorhead1991/android_kernel_lge_l35g | drivers/s390/cio/ccwgroup.c | 108 | 16872 | /*
* bus driver for ccwgroup
*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#define CCW_BUS_ID_SIZE 20
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
/* a device matches a driver if all its slave devices match the same
* entry of the driver */
static int
ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(drv);
if (gdev->creator_id == gdrv->driver_id)
return 1;
return 0;
}
static int
ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
{
/* TODO */
return 0;
}
static struct bus_type ccwgroup_bus_type;
static void
__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/*
* Remove references from ccw devices to ccw group device and from
* ccw group device to ccw devices.
*/
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
{
struct ccw_device *cdev;
int i;
for (i = 0; i < gdev->count; i++) {
cdev = gdev->cdev[i];
if (!cdev)
continue;
spin_lock_irq(cdev->ccwlock);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irq(cdev->ccwlock);
gdev->cdev[i] = NULL;
put_device(&cdev->dev);
}
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
static void ccwgroup_ungroup_callback(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
static ssize_t
ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
int rc;
gdev = to_ccwgroupdev(dev);
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state != CCWGROUP_OFFLINE) {
rc = -EINVAL;
goto out;
}
/* Note that we cannot unregister the device from one of its
* attribute methods, so we have to use this roundabout approach.
*/
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
if (rc != -EAGAIN)
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static void
ccwgroup_release (struct device *dev)
{
kfree(to_ccwgroupdev(dev));
}
static int
__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
"group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
static int __get_next_bus_id(const char **buf, char *bus_id)
{
int rc, len;
char *start, *end;
start = (char *)*buf;
end = strchr(start, ',');
if (!end) {
/* Last entry. Strip trailing newline, if applicable. */
end = strchr(start, '\n');
if (end)
*end = '\0';
len = strlen(start) + 1;
} else {
len = end - start + 1;
end++;
}
if (len < CCW_BUS_ID_SIZE) {
strlcpy(bus_id, start, len);
rc = 0;
} else
rc = -EINVAL;
*buf = end;
return rc;
}
static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
{
int cssid, ssid, devno;
/* Must be of form %x.%x.%04x */
if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
return 0;
return 1;
}
/**
* ccwgroup_create_from_string() - create and register a ccw group device
* @root: parent device for the new device
* @creator_id: identifier of creating driver
* @cdrv: ccw driver of slave devices
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
* Create and register a new ccw group device as a child of @root. Slave
* devices are obtained from the list of bus ids given in @buf and must all
* belong to @cdrv.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
struct ccw_driver *cdrv, int num_devices,
const char *buf)
{
struct ccwgroup_device *gdev;
int rc, i;
char tmp_bus_id[CCW_BUS_ID_SIZE];
const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
gdev->creator_id = creator_id;
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = root;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
curr_buf = buf;
for (i = 0; i < num_devices && curr_buf; i++) {
rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
if (rc != 0)
goto error;
if (!__is_valid_bus_id(tmp_bus_id)) {
rc = -EINVAL;
goto error;
}
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
if (!gdev->cdev[i]
|| gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices && !curr_buf) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
if (i == num_devices && strlen(curr_buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
rc = device_add(&gdev->dev);
if (rc)
goto error;
get_device(&gdev->dev);
rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
if (rc) {
device_unregister(&gdev->dev);
goto error;
}
rc = __ccwgroup_create_symlinks(gdev);
if (!rc) {
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return 0;
}
device_remove_file(&gdev->dev, &dev_attr_ungroup);
device_unregister(&gdev->dev);
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
}
EXPORT_SYMBOL(ccwgroup_create_from_string);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data);
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
};
static int __init init_ccwgroup(void)
{
int ret;
ret = bus_register(&ccwgroup_bus_type);
if (ret)
return ret;
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
if (ret)
bus_unregister(&ccwgroup_bus_type);
return ret;
}
static void __exit cleanup_ccwgroup(void)
{
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static int
ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static int
ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
goto out;
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static ssize_t
ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
unsigned long value;
int ret;
if (!dev->driver)
return -ENODEV;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (!try_module_get(gdrv->owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
if (ret)
goto out;
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
module_put(gdrv->owner);
return (ret == 0) ? count : ret;
}
static ssize_t
ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
{
int online;
online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
return sprintf(buf, online ? "1\n" : "0\n");
}
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static int
ccwgroup_probe (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
int ret;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
return ret;
}
static int
ccwgroup_remove (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
device_remove_file(dev, &dev_attr_online);
device_remove_file(dev, &dev_attr_ungroup);
if (!dev->driver)
return 0;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->remove)
gdrv->remove(gdev);
return 0;
}
static void ccwgroup_shutdown(struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
if (!dev->driver)
return;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
static int ccwgroup_pm_prepare(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
/* Fail while device is being set online/offline. */
if (atomic_read(&gdev->onoff))
return -EAGAIN;
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->prepare ? gdrv->prepare(gdev) : 0;
}
static void ccwgroup_pm_complete(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return;
if (gdrv->complete)
gdrv->complete(gdev);
}
static int ccwgroup_pm_freeze(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->freeze ? gdrv->freeze(gdev) : 0;
}
static int ccwgroup_pm_thaw(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->thaw ? gdrv->thaw(gdev) : 0;
}
static int ccwgroup_pm_restore(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
.thaw = ccwgroup_pm_thaw,
.restore = ccwgroup_pm_restore,
};
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
if (action == BUS_NOTIFY_UNBIND_DRIVER)
device_schedule_callback(dev, ccwgroup_ungroup_callback);
return NOTIFY_OK;
}
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
*/
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
cdriver->driver.name = cdriver->name;
cdriver->driver.owner = cdriver->owner;
return driver_register(&cdriver->driver);
}
static int
__ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
__ccwgroup_remove_cdev_refs(gdev);
mutex_unlock(&gdev->reg_mutex);
put_device(dev);
}
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
* This is a dummy probe function for ccw devices that are slave devices in
* a ccw group device.
* Returns:
* always %0
*/
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
* @cdev: ccw device to be removed
*
* This is a remove function for ccw devices that are slave devices in a ccw
* group device. It sets the ccw device offline and also deregisters the
* embedding ccw group device.
*/
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
spin_lock_irq(cdev->ccwlock);
gdev = dev_get_drvdata(&cdev->dev);
if (!gdev) {
spin_unlock_irq(cdev->ccwlock);
return;
}
/* Get ccwgroup device reference for local processing. */
get_device(&gdev->dev);
spin_unlock_irq(cdev->ccwlock);
/* Unregister group device. */
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccwgroup_driver_register);
EXPORT_SYMBOL(ccwgroup_driver_unregister);
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
| gpl-2.0 |
sgstreet/linux-socfpga | arch/arm/mach-s5pv210/common.c | 108 | 6673 | /*
* Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Common Codes for S5PV210
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/device.h>
#include <clocksource/samsung_pwm.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/serial_core.h>
#include <linux/serial_s3c.h>
#include <asm/proc-fns.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <plat/cpu.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/sdhci.h>
#include <plat/adc-core.h>
#include <plat/ata-core.h>
#include <plat/fb-core.h>
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
#include <plat/keypad-core.h>
#include <plat/pwm-core.h>
#include <plat/tv-core.h>
#include <plat/spi-core.h>
#include "common.h"
static const char name_s5pv210[] = "S5PV210/S5PC110";
static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = S5PV210_CPU_ID,
.idmask = S5PV210_CPU_MASK,
.map_io = s5pv210_map_io,
.init_clocks = s5pv210_init_clocks,
.init_uarts = s5pv210_init_uarts,
.init = s5pv210_init,
.name = name_s5pv210,
},
};
/* Initial IO mappings */
static struct map_desc s5pv210_iodesc[] __initdata = {
{
.virtual = (unsigned long)S5P_VA_CHIPID,
.pfn = __phys_to_pfn(S5PV210_PA_CHIPID),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_SYS,
.pfn = __phys_to_pfn(S5PV210_PA_SYSCON),
.length = SZ_64K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_TIMER,
.pfn = __phys_to_pfn(S5PV210_PA_TIMER),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_WATCHDOG,
.pfn = __phys_to_pfn(S5PV210_PA_WATCHDOG),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_SROMC,
.pfn = __phys_to_pfn(S5PV210_PA_SROMC),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_SYSTIMER,
.pfn = __phys_to_pfn(S5PV210_PA_SYSTIMER),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GPIO,
.pfn = __phys_to_pfn(S5PV210_PA_GPIO),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC0,
.pfn = __phys_to_pfn(S5PV210_PA_VIC0),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC1,
.pfn = __phys_to_pfn(S5PV210_PA_VIC1),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC2,
.pfn = __phys_to_pfn(S5PV210_PA_VIC2),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC3,
.pfn = __phys_to_pfn(S5PV210_PA_VIC3),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_UART,
.pfn = __phys_to_pfn(S3C_PA_UART),
.length = SZ_512K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_DMC0,
.pfn = __phys_to_pfn(S5PV210_PA_DMC0),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_DMC1,
.pfn = __phys_to_pfn(S5PV210_PA_DMC1),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_USB_HSPHY,
.pfn =__phys_to_pfn(S5PV210_PA_HSPHY),
.length = SZ_4K,
.type = MT_DEVICE,
}
};
void s5pv210_restart(enum reboot_mode mode, const char *cmd)
{
__raw_writel(0x1, S5P_SWRESET);
}
static struct samsung_pwm_variant s5pv210_pwm_variant = {
.bits = 32,
.div_base = 0,
.has_tint_cstat = true,
.tclk_mask = (1 << 5),
};
void __init samsung_set_timer_source(unsigned int event, unsigned int source)
{
s5pv210_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
s5pv210_pwm_variant.output_mask &= ~(BIT(event) | BIT(source));
}
void __init samsung_timer_init(void)
{
unsigned int timer_irqs[SAMSUNG_PWM_NUM] = {
IRQ_TIMER0_VIC, IRQ_TIMER1_VIC, IRQ_TIMER2_VIC,
IRQ_TIMER3_VIC, IRQ_TIMER4_VIC,
};
samsung_pwm_clocksource_init(S3C_VA_TIMER,
timer_irqs, &s5pv210_pwm_variant);
}
/*
* s5pv210_map_io
*
* register the standard cpu IO areas
*/
void __init s5pv210_init_io(struct map_desc *mach_desc, int size)
{
/* initialize the io descriptors we need for initialization */
iotable_init(s5pv210_iodesc, ARRAY_SIZE(s5pv210_iodesc));
if (mach_desc)
iotable_init(mach_desc, size);
/* detect cpu id and rev. */
s5p_init_cpu(S5P_VA_CHIPID);
s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
samsung_pwm_set_platdata(&s5pv210_pwm_variant);
}
void __init s5pv210_map_io(void)
{
/* initialise device information early */
s5pv210_default_sdhci0();
s5pv210_default_sdhci1();
s5pv210_default_sdhci2();
s5pv210_default_sdhci3();
s3c_adc_setname("samsung-adc-v3");
s3c_cfcon_setname("s5pv210-pata");
s3c_fimc_setname(0, "s5pv210-fimc");
s3c_fimc_setname(1, "s5pv210-fimc");
s3c_fimc_setname(2, "s5pv210-fimc");
/* the i2c devices are directly compatible with s3c2440 */
s3c_i2c0_setname("s3c2440-i2c");
s3c_i2c1_setname("s3c2440-i2c");
s3c_i2c2_setname("s3c2440-i2c");
s3c_fb_setname("s5pv210-fb");
/* Use s5pv210-keypad instead of samsung-keypad */
samsung_keypad_setname("s5pv210-keypad");
/* setup TV devices */
s5p_hdmi_setname("s5pv210-hdmi");
s3c64xx_spi_setname("s5pv210-spi");
}
void __init s5pv210_init_clocks(int xtal)
{
printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
s3c24xx_register_baseclocks(xtal);
s5p_register_clocks(xtal);
s5pv210_register_clocks();
s5pv210_setup_clocks();
}
void __init s5pv210_init_irq(void)
{
u32 vic[4]; /* S5PV210 supports 4 VIC */
/* All the VICs are fully populated. */
vic[0] = ~0;
vic[1] = ~0;
vic[2] = ~0;
vic[3] = ~0;
s5p_init_irq(vic, ARRAY_SIZE(vic));
}
struct bus_type s5pv210_subsys = {
.name = "s5pv210-core",
.dev_name = "s5pv210-core",
};
static struct device s5pv210_dev = {
.bus = &s5pv210_subsys,
};
static int __init s5pv210_core_init(void)
{
return subsys_system_register(&s5pv210_subsys, NULL);
}
core_initcall(s5pv210_core_init);
int __init s5pv210_init(void)
{
printk(KERN_INFO "S5PV210: Initializing architecture\n");
return device_register(&s5pv210_dev);
}
/* uart registration process */
void __init s5pv210_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
}
| gpl-2.0 |
ouyangshiliang/hg255 | package/switch/src/switch-adm.c | 108 | 13371 | /*
* ADMTEK Adm6996 switch configuration module
*
* Copyright (C) 2005 Felix Fietkau <nbd@nbd.name>
*
* Partially based on Broadcom Home Networking Division 10/100 Mbit/s
* Ethernet Device Driver (from Montavista 2.4.20_mvl31 Kernel).
* Copyright (C) 2004 Broadcom Corporation
*
* adm_rreg function from adm6996
* Copyright (C) 2004 Nikki Chumakov <nikki@gattaca.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/sockios.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include "switch-core.h"
#include "gpio.h"
#ifdef CONFIG_BCM47XX
#include <nvram.h>
#endif
#define DRIVER_NAME "adm6996"
#define DRIVER_VERSION "0.01"
static int eecs = 0;
static int eesk = 0;
static int eedi = 0;
static int eerc = 0;
static int force = 0;
MODULE_AUTHOR("Felix Fietkau <openwrt@nbd.name>");
MODULE_LICENSE("GPL");
module_param(eecs, int, 0);
module_param(eesk, int, 0);
module_param(eedi, int, 0);
module_param(eerc, int, 0);
module_param(force, int, 0);
/* Minimum timing constants */
#define EECK_EDGE_TIME 3 /* 3us - max(adm 2.5us, 93c 1us) */
#define EEDI_SETUP_TIME 1 /* 1us - max(adm 10ns, 93c 400ns) */
#define EECS_SETUP_TIME 1 /* 1us - max(adm no, 93c 200ns) */
/* Handy macros for writing fixed length values */
#define adm_write8(cs, b) { __u8 val = (__u8) (b); adm_write(cs, &val, sizeof(val)*8); }
#define adm_write16(cs, w) { __u16 val = hton16(w); adm_write(cs, (__u8 *)&val, sizeof(val)*8); }
#define adm_write32(cs, i) { uint32 val = hton32(i); adm_write(cs, (__u8 *)&val, sizeof(val)*8); }
#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
#ifdef CONFIG_BCM47XX
/* Return gpio pin number assigned to the named pin */
/*
* Variable should be in format:
*
* gpio<N>=pin_name
*
* 'def_pin' is returned if there is no such variable found.
*/
static unsigned int get_gpiopin(char *pin_name, unsigned int def_pin)
{
char name[] = "gpioXXXX";
char val[10];
unsigned int pin;
/* Go thru all possibilities till a match in pin name */
for (pin = 0; pin < 16; pin ++) {
sprintf(name, "gpio%d", pin);
if (nvram_getenv(name, val, sizeof(val)) >= 0) {
if (!strcmp(val, pin_name))
return pin;
}
}
return def_pin;
}
#endif
static void adm_write(int cs, char *buf, unsigned int bits)
{
int i, len = (bits + 7) / 8;
__u8 mask;
bcm47xx_gpio_out(eecs, (cs ? eecs : 0));
udelay(EECK_EDGE_TIME);
/* Byte assemble from MSB to LSB */
for (i = 0; i < len; i++) {
/* Bit bang from MSB to LSB */
for (mask = 0x80; mask && bits > 0; mask >>= 1, bits --) {
/* Clock low */
bcm47xx_gpio_out(eesk, 0);
udelay(EECK_EDGE_TIME);
/* Output on rising edge */
bcm47xx_gpio_out(eedi, ((mask & buf[i]) ? eedi : 0));
udelay(EEDI_SETUP_TIME);
/* Clock high */
bcm47xx_gpio_out(eesk, eesk);
udelay(EECK_EDGE_TIME);
}
}
/* Clock low */
bcm47xx_gpio_out(eesk, 0);
udelay(EECK_EDGE_TIME);
if (cs)
bcm47xx_gpio_out(eecs, 0);
}
static void adm_read(int cs, char *buf, unsigned int bits)
{
int i, len = (bits + 7) / 8;
__u8 mask;
bcm47xx_gpio_out(eecs, (cs ? eecs : 0));
udelay(EECK_EDGE_TIME);
/* Byte assemble from MSB to LSB */
for (i = 0; i < len; i++) {
__u8 byte;
/* Bit bang from MSB to LSB */
for (mask = 0x80, byte = 0; mask && bits > 0; mask >>= 1, bits --) {
__u8 gp;
/* Clock low */
bcm47xx_gpio_out(eesk, 0);
udelay(EECK_EDGE_TIME);
/* Input on rising edge */
gp = bcm47xx_gpio_in(~0);
if (gp & eedi)
byte |= mask;
/* Clock high */
bcm47xx_gpio_out(eesk, eesk);
udelay(EECK_EDGE_TIME);
}
*buf++ = byte;
}
/* Clock low */
bcm47xx_gpio_out(eesk, 0);
udelay(EECK_EDGE_TIME);
if (cs)
bcm47xx_gpio_out(eecs, 0);
}
/* Enable outputs with specified value to the chip */
static void adm_enout(__u8 pins, __u8 val)
{
/* Prepare GPIO output value */
bcm47xx_gpio_out(pins, val);
/* Enable GPIO outputs */
bcm47xx_gpio_outen(pins, pins);
udelay(EECK_EDGE_TIME);
}
/* Disable outputs to the chip */
static void adm_disout(__u8 pins)
{
/* Disable GPIO outputs */
bcm47xx_gpio_outen(pins, 0);
udelay(EECK_EDGE_TIME);
}
/* Advance clock(s) */
static void adm_adclk(int clocks)
{
int i;
for (i = 0; i < clocks; i++) {
/* Clock high */
bcm47xx_gpio_out(eesk, eesk);
udelay(EECK_EDGE_TIME);
/* Clock low */
bcm47xx_gpio_out(eesk, 0);
udelay(EECK_EDGE_TIME);
}
}
static __u32 adm_rreg(__u8 table, __u8 addr)
{
/* cmd: 01 10 T DD R RRRRRR */
__u8 bits[6] = {
0xFF, 0xFF, 0xFF, 0xFF,
(0x06 << 4) | ((table & 0x01) << 3 | (addr&64)>>6),
((addr&63)<<2)
};
__u8 rbits[4];
/* Enable GPIO outputs with all pins to 0 */
adm_enout((__u8)(eecs | eesk | eedi), 0);
adm_write(0, bits, 46);
adm_disout((__u8)(eedi));
adm_adclk(2);
adm_read (0, rbits, 32);
/* Extra clock(s) required per datasheet */
adm_adclk(2);
/* Disable GPIO outputs */
adm_disout((__u8)(eecs | eesk));
if (!table) /* EEPROM has 16-bit registers, but pumps out two registers in one request */
return (addr & 0x01 ? (rbits[0]<<8) | rbits[1] : (rbits[2]<<8) | (rbits[3]));
else
return (rbits[0]<<24) | (rbits[1]<<16) | (rbits[2]<<8) | rbits[3];
}
/* Write chip configuration register */
/* Follow 93c66 timing and chip's min EEPROM timing requirement */
void
adm_wreg(__u8 addr, __u16 val)
{
/* cmd(27bits): sb(1) + opc(01) + addr(bbbbbbbb) + data(bbbbbbbbbbbbbbbb) */
__u8 bits[4] = {
(0x05 << 5) | (addr >> 3),
(addr << 5) | (__u8)(val >> 11),
(__u8)(val >> 3),
(__u8)(val << 5)
};
/* Enable GPIO outputs with all pins to 0 */
adm_enout((__u8)(eecs | eesk | eedi), 0);
/* Write cmd. Total 27 bits */
adm_write(1, bits, 27);
/* Extra clock(s) required per datasheet */
adm_adclk(2);
/* Disable GPIO outputs */
adm_disout((__u8)(eecs | eesk | eedi));
}
/* Port configuration registers */
static int port_conf[] = { 0x01, 0x03, 0x05, 0x07, 0x08, 0x09 };
/* Bits in VLAN port mapping */
static int vlan_ports[] = { 1 << 0, 1 << 2, 1 << 4, 1 << 6, 1 << 7, 1 << 8 };
static int handle_vlan_port_read(void *driver, char *buf, int nr)
{
int ports, i, c, len = 0;
if ((nr < 0) || (nr > 15))
return 0;
/* Get VLAN port map */
ports = adm_rreg(0, 0x13 + nr);
for (i = 0; i <= 5; i++) {
if (ports & vlan_ports[i]) {
c = adm_rreg(0, port_conf[i]);
len += sprintf(buf + len, "%d", i);
if (c & (1 << 4)) {
buf[len++] = 't';
if (((c & (0xf << 10)) >> 10) == nr)
buf[len++] = '*';
} else if (i == 5)
buf[len++] = 'u';
buf[len++] = '\t';
}
}
len += sprintf(buf + len, "\n");
return len;
}
static int handle_vlan_port_write(void *driver, char *buf, int nr)
{
int i, cfg, ports;
switch_driver *d = (switch_driver *) driver;
switch_vlan_config *c = switch_parse_vlan(d, buf);
if (c == NULL)
return -1;
ports = adm_rreg(0, 0x13 + nr);
for (i = 0; i < d->ports; i++) {
if (c->port & (1 << i)) {
ports |= vlan_ports[i];
cfg = adm_rreg(0, port_conf[i]);
/* Tagging */
if (c->untag & (1 << i))
cfg &= ~(1 << 4);
else
cfg |= (1 << 4);
if ((c->untag | c->pvid) & (1 << i)) {
cfg = (cfg & ~(0xf << 10)) | (nr << 10);
}
adm_wreg(port_conf[i], (__u16) cfg);
} else {
ports &= ~(vlan_ports[i]);
}
}
adm_wreg(0x13 + nr, (__u16) ports);
kfree(c);
return 0;
}
static int handle_port_enable_read(void *driver, char *buf, int nr)
{
return sprintf(buf, "%d\n", ((adm_rreg(0, port_conf[nr]) & (1 << 5)) ? 0 : 1));
}
static int handle_port_enable_write(void *driver, char *buf, int nr)
{
int reg = adm_rreg(0, port_conf[nr]);
if (buf[0] == '0')
reg |= (1 << 5);
else if (buf[0] == '1')
reg &= ~(1 << 5);
else return -1;
adm_wreg(port_conf[nr], (__u16) reg);
return 0;
}
static int handle_port_media_read(void *driver, char *buf, int nr)
{
int len;
int media = 0;
int reg = adm_rreg(0, port_conf[nr]);
if (reg & (1 << 1))
media |= SWITCH_MEDIA_AUTO;
if (reg & (1 << 2))
media |= SWITCH_MEDIA_100;
if (reg & (1 << 3))
media |= SWITCH_MEDIA_FD;
len = switch_print_media(buf, media);
return len + sprintf(buf + len, "\n");
}
static int handle_port_media_write(void *driver, char *buf, int nr)
{
int media = switch_parse_media(buf);
int reg = adm_rreg(0, port_conf[nr]);
if (media < 0)
return -1;
reg &= ~((1 << 1) | (1 << 2) | (1 << 3));
if (media & SWITCH_MEDIA_AUTO)
reg |= 1 << 1;
if (media & SWITCH_MEDIA_100)
reg |= 1 << 2;
if (media & SWITCH_MEDIA_FD)
reg |= 1 << 3;
adm_wreg(port_conf[nr], reg);
return 0;
}
static int handle_vlan_enable_read(void *driver, char *buf, int nr)
{
return sprintf(buf, "%d\n", ((adm_rreg(0, 0x11) & (1 << 5)) ? 1 : 0));
}
static int handle_vlan_enable_write(void *driver, char *buf, int nr)
{
int reg = adm_rreg(0, 0x11);
if (buf[0] == '1')
reg |= (1 << 5);
else if (buf[0] == '0')
reg &= ~(1 << 5);
else return -1;
adm_wreg(0x11, (__u16) reg);
return 0;
}
static int handle_reset(void *driver, char *buf, int nr)
{
int i;
u32 cfg;
/*
* Reset sequence: RC high->low(100ms)->high(30ms)
*
* WAR: Certain boards don't have the correct power on
* reset logic therefore we must explicitly perform the
* sequence in software.
*/
if (eerc) {
/* Keep RC high for at least 20ms */
adm_enout(eerc, eerc);
for (i = 0; i < 20; i ++)
udelay(1000);
/* Keep RC low for at least 100ms */
adm_enout(eerc, 0);
for (i = 0; i < 100; i++)
udelay(1000);
/* Set default configuration */
adm_enout((__u8)(eesk | eedi), eesk);
/* Keep RC high for at least 30ms */
adm_enout(eerc, eerc);
for (i = 0; i < 30; i++)
udelay(1000);
/* Leave RC high and disable GPIO outputs */
adm_disout((__u8)(eecs | eesk | eedi));
}
/* set up initial configuration for cpu port */
cfg = (0x8000 | /* Auto MDIX */
(0xf << 10) | /* PVID */
(1 << 4) | /* Tagging */
0xf); /* full duplex, 100Mbps, auto neg, flow ctrl */
adm_wreg(port_conf[5], cfg);
/* vlan mode select register (0x11): vlan on, mac clone */
adm_wreg(0x11, 0xff30);
return 0;
}
static int handle_registers(void *driver, char *buf, int nr)
{
int i, len = 0;
for (i = 0; i <= 0x33; i++) {
len += sprintf(buf + len, "0x%02x: 0x%04x\n", i, adm_rreg(0, i));
}
return len;
}
static int handle_counters(void *driver, char *buf, int nr)
{
int i, len = 0;
for (i = 0; i <= 0x3c; i++) {
len += sprintf(buf + len, "0x%02x: 0x%08x\n", i, adm_rreg(1, i));
}
return len;
}
static int detect_adm(void)
{
int ret = 0;
#ifdef CONFIG_BCM47XX
char buf[20];
int boardflags = 0;
int boardnum = 0;
if (nvram_getenv("boardflags", buf, sizeof(buf)) >= 0)
boardflags = simple_strtoul(buf, NULL, 0);
if (nvram_getenv("boardnum", buf, sizeof(buf)) >= 0)
boardnum = simple_strtoul(buf, NULL, 0);
if ((boardnum == 44) && (boardflags == 0x0388)) { /* Trendware TEW-411BRP+ */
ret = 1;
eecs = get_gpiopin("adm_eecs", 2);
eesk = get_gpiopin("adm_eesk", 3);
eedi = get_gpiopin("adm_eedi", 4);
eerc = get_gpiopin("adm_rc", 5);
} else if ((boardflags & 0x80) || force) {
ret = 1;
eecs = get_gpiopin("adm_eecs", 2);
eesk = get_gpiopin("adm_eesk", 3);
eedi = get_gpiopin("adm_eedi", 4);
eerc = get_gpiopin("adm_rc", 0);
} else if (nvram_getenv("boardtype", buf, sizeof(buf)) >= 0) {
if (strcmp(buf, "bcm94710dev") == 0) {
if (nvram_getenv("boardnum", buf, sizeof(buf)) >= 0) {
if (strncmp(buf, "42", 2) == 0) {
/* WRT54G v1.1 hack */
eecs = 2;
eesk = 3;
eedi = 5;
ret = 1;
}
}
}
}
if (eecs)
eecs = (1 << eecs);
if (eesk)
eesk = (1 << eesk);
if (eedi)
eedi = (1 << eedi);
if (eerc)
eerc = (1 << eerc);
#else
ret = 1;
#endif
return ret;
}
static int __init adm_init(void)
{
switch_config cfg[] = {
{"registers", handle_registers, NULL},
{"counters", handle_counters, NULL},
{"reset", NULL, handle_reset},
{"enable_vlan", handle_vlan_enable_read, handle_vlan_enable_write},
{NULL, NULL, NULL}
};
switch_config port[] = {
{"enable", handle_port_enable_read, handle_port_enable_write},
{"media", handle_port_media_read, handle_port_media_write},
{NULL, NULL, NULL}
};
switch_config vlan[] = {
{"ports", handle_vlan_port_read, handle_vlan_port_write},
{NULL, NULL, NULL}
};
switch_driver driver = {
name: DRIVER_NAME,
version: DRIVER_VERSION,
interface: "eth0",
ports: 6,
cpuport: 5,
vlans: 16,
driver_handlers: cfg,
port_handlers: port,
vlan_handlers: vlan,
};
if (!detect_adm())
return -ENODEV;
return switch_register_driver(&driver);
}
static void __exit adm_exit(void)
{
switch_unregister_driver(DRIVER_NAME);
}
module_init(adm_init);
module_exit(adm_exit);
| gpl-2.0 |
chen2011521/xt560_kernel | drivers/net/ll_temac_main.c | 108 | 29792 | /*
* Driver for Xilinx TEMAC Ethernet device
*
* Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
* Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
*
* This is a driver for the Xilinx ll_temac ipcore which is often used
* in the Virtex and Spartan series of chips.
*
* Notes:
* - The ll_temac hardware uses indirect access for many of the TEMAC
* registers, include the MDIO bus. However, indirect access to MDIO
* registers take considerably more clock cycles than to TEMAC registers.
* MDIO accesses are long, so threads doing them should probably sleep
* rather than busywait. However, since only one indirect access can be
* in progress at any given time, that means that *all* indirect accesses
* could end up sleeping (to wait for an MDIO access to complete).
* Fortunately none of the indirect accesses are on the 'hot' path for tx
* or rx, so this should be okay.
*
* TODO:
* - Factor out locallink DMA code into separate driver
* - Fix multicast assignment.
* - Fix support for hardware checksumming.
* - Testing. Lots and lots of testing.
*
*/
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
#include <linux/udp.h> /* needed for sizeof(udphdr) */
#include <linux/phy.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include "ll_temac.h"
#define TX_BD_NUM 64
#define RX_BD_NUM 128
/* ---------------------------------------------------------------------
* Low level register access functions
*/
u32 temac_ior(struct temac_local *lp, int offset)
{
return in_be32((u32 *)(lp->regs + offset));
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
out_be32((u32 *) (lp->regs + offset), value);
}
int temac_indirect_busywait(struct temac_local *lp)
{
long end = jiffies + 2;
while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
if (end - jiffies <= 0) {
WARN_ON(1);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
/**
* temac_indirect_in32
*
* lp->indirect_mutex must be held when calling this function
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{
u32 val;
if (temac_indirect_busywait(lp))
return -ETIMEDOUT;
temac_iow(lp, XTE_CTL0_OFFSET, reg);
if (temac_indirect_busywait(lp))
return -ETIMEDOUT;
val = temac_ior(lp, XTE_LSW0_OFFSET);
return val;
}
/**
* temac_indirect_out32
*
* lp->indirect_mutex must be held when calling this function
*/
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{
if (temac_indirect_busywait(lp))
return;
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
}
/**
* temac_dma_in32 - Memory mapped DMA read, this function expects a
* register input that is based on DCR word addresses which
* are then converted to memory mapped byte addresses
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
}
/**
* temac_dma_out32 - Memory mapped DMA read, this function expects a
* register input that is based on DCR word addresses which
* are then converted to memory mapped byte addresses
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
}
/* DMA register access functions can be DCR based or memory mapped.
* The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
* memory mapped.
*/
#ifdef CONFIG_PPC_DCR
/**
* temac_dma_dcr_in32 - DCR based DMA read
*/
static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
{
return dcr_read(lp->sdma_dcrs, reg);
}
/**
* temac_dma_dcr_out32 - DCR based DMA write
*/
static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
{
dcr_write(lp->sdma_dcrs, reg, value);
}
/**
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
unsigned int dcrs;
/* setup the dcr address mapping if it's in the device tree */
dcrs = dcr_resource_start(np, 0);
if (dcrs != 0) {
lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
lp->dma_in = temac_dma_dcr_in;
lp->dma_out = temac_dma_dcr_out;
dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
return 0;
}
/* no DCR in the device tree, indicate a failure */
return -1;
}
#else
/*
* temac_dcr_setup - This is a stub for when DCR is not supported,
* such as with MicroBlaze
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
{
return -1;
}
#endif
/**
* * temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
int i;
for (i = 0; i < RX_BD_NUM; i++) {
if (!lp->rx_skb[i])
break;
else {
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(lp->rx_skb[i]);
}
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
lp->tx_bd_v, lp->tx_bd_p);
if (lp->rx_skb)
kfree(lp->rx_skb);
}
/**
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
int i;
lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
goto out;
}
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) {
dev_err(&ndev->dev,
"unable to allocate DMA TX buffer descriptors");
goto out;
}
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) {
dev_err(&ndev->dev,
"unable to allocate DMA RX buffer descriptors");
goto out;
}
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (skb == 0) {
dev_err(&ndev->dev, "alloc_skb error %d\n", i);
goto out;
}
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
}
lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN);
/* 0x10220483 */
/* 0x00100483 */
lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
CHNL_CTRL_IRQ_EN |
CHNL_CTRL_IRQ_DLY_EN |
CHNL_CTRL_IRQ_COAL_EN |
CHNL_CTRL_IRQ_IOE);
/* 0xff010283 */
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
return 0;
out:
temac_dma_bd_release(ndev);
return -ENOMEM;
}
/* ---------------------------------------------------------------------
* net_device_ops
*/
static int temac_set_mac_address(struct net_device *ndev, void *address)
{
struct temac_local *lp = netdev_priv(ndev);
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
random_ether_addr(ndev->dev_addr);
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
* so don't affect them Set MAC bits [47:32] in EUAW1 */
temac_indirect_out32(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
mutex_unlock(&lp->indirect_mutex);
return 0;
}
static int netdev_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
return temac_set_mac_address(ndev, addr->sa_data);
}
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
u32 multi_addr_msw, multi_addr_lsw, val;
int i;
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
* the cable. If it was a promisc request the
* flag is already set. If not we assert it.
*/
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
multi_addr_msw = ((ha->addr[3] << 24) |
(ha->addr[2] << 16) |
(ha->addr[1] << 8) |
(ha->addr[0]));
temac_indirect_out32(lp, XTE_MAW0_OFFSET,
multi_addr_msw);
multi_addr_lsw = ((ha->addr[5] << 8) |
(ha->addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
i++;
}
} else {
val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
temac_indirect_out32(lp, XTE_AFM_OFFSET,
val & ~XTE_AFM_EPPRM_MASK);
temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
mutex_unlock(&lp->indirect_mutex);
}
struct temac_option {
int flg;
u32 opt;
u32 reg;
u32 m_or;
u32 m_and;
} temac_options[] = {
/* Turn on jumbo packet support for both Rx and Tx */
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_TXC_OFFSET,
.m_or = XTE_TXC_TXJMBO_MASK,
},
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or =XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
.m_or =XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
.m_or =XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
.m_or =XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
.m_or =XTE_RXC1_RXEN_MASK,
},
{}
};
/**
* temac_setoptions
*/
static u32 temac_setoptions(struct net_device *ndev, u32 options)
{
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
mutex_lock(&lp->indirect_mutex);
while (tp->opt) {
reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
if (options & tp->opt)
reg |= tp->m_or;
temac_indirect_out32(lp, tp->reg, reg);
tp++;
}
lp->options |= options;
mutex_unlock(&lp->indirect_mutex);
return 0;
}
/* Initialize temac */
static void temac_device_reset(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
u32 timeout;
u32 val;
/* Perform a software reset */
/* 0x300 host enable bit ? */
/* reset PHY through control register ?:1 */
dev_dbg(&ndev->dev, "%s()\n", __func__);
mutex_lock(&lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset RX reset timeout!!\n");
break;
}
}
/* Reset the transmitter and wait for it to finish reset */
temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
timeout = 1000;
while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset TX reset timeout!!\n");
break;
}
}
/* Disable the receiver */
val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
timeout = 1000;
while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
"temac_device_reset DMA reset timeout!!\n");
break;
}
}
lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
if (temac_dma_bd_init(ndev)) {
dev_err(&ndev->dev,
"temac_device_reset descriptor allocation failed\n");
}
temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
mutex_unlock(&lp->indirect_mutex);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
temac_set_mac_address(ndev, NULL);
/* Set address filter table */
temac_set_multicast_list(ndev);
if (temac_setoptions(ndev, lp->options))
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
ndev->trans_start = jiffies; /* prevent tx timeout */
}
void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
u32 mii_speed;
int link_state;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
mutex_lock(&lp->indirect_mutex);
if (lp->last_link != link_state) {
mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
}
/* Write new speed setting out to TEMAC */
temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
lp->last_link = link_state;
phy_print_status(phy);
}
mutex_unlock(&lp->indirect_mutex);
}
static void temac_start_xmit_done(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
unsigned int stat = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
stat = cur_p->app0;
while (stat & STS_CTRL_APP0_CMPLT) {
dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
DMA_TO_DEVICE);
if (cur_p->app4)
dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += cur_p->len;
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
stat = cur_p->app0;
}
netif_wake_queue(ndev);
}
static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
{
struct cdmac_bd *cur_p;
int tail;
tail = lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[tail];
do {
if (cur_p->app0)
return NETDEV_TX_BUSY;
tail++;
if (tail >= TX_BD_NUM)
tail = 0;
cur_p = &lp->tx_bd_v[tail];
num_frag--;
} while (num_frag >= 0);
return 0;
}
static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
dma_addr_t start_p, tail_p;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
return NETDEV_TX_BUSY;
}
cur_p->app0 = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
cur_p->app0 |= 1; /* TX Checksum Enabled */
cur_p->app1 = (csum_start_off << 16) | csum_index_off;
cur_p->app2 = 0; /* initial checksum seed */
}
cur_p->app0 |= STS_CTRL_APP0_SOP;
cur_p->len = skb_headlen(skb);
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
cur_p->app4 = (unsigned long)skb;
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
cur_p->phys = dma_map_single(ndev->dev.parent,
(void *)page_address(frag->page) +
frag->page_offset,
frag->size, DMA_TO_DEVICE);
cur_p->len = frag->size;
cur_p->app0 = 0;
frag++;
}
cur_p->app0 |= STS_CTRL_APP0_EOP;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
/* Kick off the transfer */
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
}
static void ll_temac_recv(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
unsigned int bdstat;
struct cdmac_bd *cur_p;
dma_addr_t tail_p;
int length;
unsigned long flags;
spin_lock_irqsave(&lp->rx_lock, flags);
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
length = cur_p->app4 & 0x3FFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
DMA_FROM_DEVICE);
skb_put(skb, length);
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
/* if we're doing rx csum offload, set it up */
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == __constant_htons(ETH_P_IP)) &&
(skb->len > 64)) {
skb->csum = cur_p->app3 & 0xFFFF;
skb->ip_summed = CHECKSUM_COMPLETE;
}
netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
if (new_skb == 0) {
dev_err(&ndev->dev, "no memory for new sk_buff\n");
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
cur_p->app0 = STS_CTRL_APP0_IRQONEND;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
lp->rx_skb[lp->rx_bd_ci] = new_skb;
lp->rx_bd_ci++;
if (lp->rx_bd_ci >= RX_BD_NUM)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
bdstat = cur_p->app0;
}
lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
status = lp->dma_in(lp, TX_IRQ_REG);
lp->dma_out(lp, TX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
if (status & 0x080)
dev_err(&ndev->dev, "DMA error 0x%x\n", status);
return IRQ_HANDLED;
}
static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
struct temac_local *lp = netdev_priv(ndev);
unsigned int status;
/* Read and clear the status registers */
status = lp->dma_in(lp, RX_IRQ_REG);
lp->dma_out(lp, RX_IRQ_REG, status);
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
return IRQ_HANDLED;
}
static int temac_open(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
int rc;
dev_dbg(&ndev->dev, "temac_open()\n");
if (lp->phy_node) {
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
temac_adjust_link, 0, 0);
if (!lp->phy_dev) {
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
phy_start(lp->phy_dev);
}
rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
if (rc)
goto err_tx_irq;
rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
if (rc)
goto err_rx_irq;
temac_device_reset(ndev);
return 0;
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
dev_err(lp->dev, "request_irq() failed\n");
return rc;
}
static int temac_stop(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "temac_close()\n");
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
temac_dma_bd_release(ndev);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
temac_poll_controller(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
disable_irq(lp->tx_irq);
disable_irq(lp->rx_irq);
ll_temac_rx_irq(lp->tx_irq, ndev);
ll_temac_tx_irq(lp->rx_irq, ndev);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
}
#endif
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
//.ndo_set_multicast_list = temac_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
};
/* ---------------------------------------------------------------------
* SYSFS device attributes
*/
static ssize_t temac_show_llink_regs(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct temac_local *lp = netdev_priv(ndev);
int i, len = 0;
for (i = 0; i < 0x11; i++)
len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
(i % 8) == 7 ? "\n" : " ");
len += sprintf(buf + len, "\n");
return len;
}
static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
static struct attribute *temac_device_attrs[] = {
&dev_attr_llink_regs.attr,
NULL,
};
static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs,
};
static int __devinit
temac_of_probe(struct platform_device *op, const struct of_device_id *match)
{
struct device_node *np;
struct temac_local *lp;
struct net_device *ndev;
const void *addr;
__be32 *p;
int size, rc = 0;
/* Init network device structure */
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev) {
dev_err(&op->dev, "could not allocate device.\n");
return -ENOMEM;
}
ether_setup(ndev);
dev_set_drvdata(&op->dev, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
ndev->netdev_ops = &temac_netdev_ops;
#if 0
ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
ndev->features |= NETIF_F_LRO; /* large receive offload */
#endif
/* setup temac private info structure */
lp = netdev_priv(ndev);
lp->ndev = ndev;
lp->dev = &op->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
mutex_init(&lp->indirect_mutex);
/* map device registers */
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
if (p && be32_to_cpu(*p)) {
lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
}
p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
if (p && be32_to_cpu(*p))
lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
goto err_iounmap;
}
/* Setup the DMA register accesses, could be DCR or memory mapped */
if (temac_dcr_setup(lp, op, np)) {
/* no DCR in the device tree, try non-DCR */
lp->sdma_regs = of_iomap(np, 0);
if (lp->sdma_regs) {
lp->dma_in = temac_dma_in32;
lp->dma_out = temac_dma_out32;
dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
} else {
dev_err(&op->dev, "unable to map DMA registers\n");
of_node_put(np);
goto err_iounmap;
}
}
lp->rx_irq = irq_of_parse_and_map(np, 0);
lp->tx_irq = irq_of_parse_and_map(np, 1);
of_node_put(np); /* Finished with the DMA node; drop the reference */
if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
}
/* Retrieve the MAC address */
addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
if ((!addr) || (size != 6)) {
dev_err(&op->dev, "could not find MAC address\n");
rc = -ENODEV;
goto err_iounmap_2;
}
temac_set_mac_address(ndev, (void *)addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
dev_warn(&op->dev, "error registering MDIO bus\n");
lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
if (lp->phy_node)
dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
/* Add the device attributes */
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
dev_err(lp->dev, "Error creating sysfs files\n");
goto err_iounmap_2;
}
rc = register_netdev(lp->ndev);
if (rc) {
dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
goto err_register_ndev;
}
return 0;
err_register_ndev:
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
err_iounmap_2:
if (lp->sdma_regs)
iounmap(lp->sdma_regs);
err_iounmap:
iounmap(lp->regs);
nodev:
free_netdev(ndev);
ndev = NULL;
return rc;
}
static int __devexit temac_of_remove(struct platform_device *op)
{
struct net_device *ndev = dev_get_drvdata(&op->dev);
struct temac_local *lp = netdev_priv(ndev);
temac_mdio_teardown(lp);
unregister_netdev(ndev);
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
if (lp->phy_node)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
dev_set_drvdata(&op->dev, NULL);
iounmap(lp->regs);
if (lp->sdma_regs)
iounmap(lp->sdma_regs);
free_netdev(ndev);
return 0;
}
static struct of_device_id temac_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
{ .compatible = "xlnx,xps-ll-temac-2.00.a", },
{ .compatible = "xlnx,xps-ll-temac-2.02.a", },
{ .compatible = "xlnx,xps-ll-temac-2.03.a", },
{},
};
MODULE_DEVICE_TABLE(of, temac_of_match);
static struct of_platform_driver temac_of_driver = {
.probe = temac_of_probe,
.remove = __devexit_p(temac_of_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xilinx_temac",
.of_match_table = temac_of_match,
},
};
static int __init temac_init(void)
{
return of_register_platform_driver(&temac_of_driver);
}
module_init(temac_init);
static void __exit temac_exit(void)
{
of_unregister_platform_driver(&temac_of_driver);
}
module_exit(temac_exit);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jackalchen/linux | drivers/staging/comedi/drivers/adl_pci6208.c | 620 | 5556 | /*
* adl_pci6208.c
* Comedi driver for ADLink 6208 series cards
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* Driver: adl_pci6208
* Description: ADLink PCI-6208/6216 Series Multi-channel Analog Output Cards
* Devices: [ADLink] PCI-6208 (adl_pci6208), PCI-6216
* Author: nsyeow <nsyeow@pd.jaring.my>
* Updated: Wed, 11 Feb 2015 11:37:18 +0000
* Status: untested
*
* Configuration Options: not applicable, uses PCI auto config
*
* All supported devices share the same PCI device ID and are treated as a
* PCI-6216 with 16 analog output channels. On a PCI-6208, the upper 8
* channels exist in registers, but don't go to DAC chips.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include "../comedi_pci.h"
/*
* PCI-6208/6216-GL register map
*/
#define PCI6208_AO_CONTROL(x) (0x00 + (2 * (x)))
#define PCI6208_AO_STATUS 0x00
#define PCI6208_AO_STATUS_DATA_SEND (1 << 0)
#define PCI6208_DIO 0x40
#define PCI6208_DIO_DO_MASK (0x0f)
#define PCI6208_DIO_DO_SHIFT (0)
#define PCI6208_DIO_DI_MASK (0xf0)
#define PCI6208_DIO_DI_SHIFT (4)
static int pci6208_ao_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + PCI6208_AO_STATUS);
if ((status & PCI6208_AO_STATUS_DATA_SEND) == 0)
return 0;
return -EBUSY;
}
static int pci6208_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int ret;
int i;
for (i = 0; i < insn->n; i++) {
val = data[i];
/* D/A transfer rate is 2.2us */
ret = comedi_timeout(dev, s, insn, pci6208_ao_eoc, 0);
if (ret)
return ret;
/* the hardware expects two's complement values */
outw(comedi_offset_munge(s, val),
dev->iobase + PCI6208_AO_CONTROL(chan));
s->readback[chan] = val;
}
return insn->n;
}
static int pci6208_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int val;
val = inw(dev->iobase + PCI6208_DIO);
val = (val & PCI6208_DIO_DI_MASK) >> PCI6208_DIO_DI_SHIFT;
data[1] = val;
return insn->n;
}
static int pci6208_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + PCI6208_DIO);
data[1] = s->state;
return insn->n;
}
static int pci6208_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct comedi_subdevice *s;
unsigned int val;
int ret;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 2);
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
s = &dev->subdevices[0];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16; /* Only 8 usable on PCI-6208 */
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = pci6208_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[1];
/* digital input subdevice */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pci6208_di_insn_bits;
s = &dev->subdevices[2];
/* digital output subdevice */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pci6208_do_insn_bits;
/*
* Get the read back signals from the digital outputs
* and save it as the initial state for the subdevice.
*/
val = inw(dev->iobase + PCI6208_DIO);
val = (val & PCI6208_DIO_DO_MASK) >> PCI6208_DIO_DO_SHIFT;
s->state = val;
return 0;
}
static struct comedi_driver adl_pci6208_driver = {
.driver_name = "adl_pci6208",
.module = THIS_MODULE,
.auto_attach = pci6208_auto_attach,
.detach = comedi_pci_detach,
};
static int adl_pci6208_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &adl_pci6208_driver,
id->driver_data);
}
static const struct pci_device_id adl_pci6208_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADLINK, 0x6208) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
0x9999, 0x6208) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, adl_pci6208_pci_table);
static struct pci_driver adl_pci6208_pci_driver = {
.name = "adl_pci6208",
.id_table = adl_pci6208_pci_table,
.probe = adl_pci6208_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(adl_pci6208_driver, adl_pci6208_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for ADLink 6208 series cards");
MODULE_LICENSE("GPL");
| gpl-2.0 |
cminyard/linux-live-app-coredump | arch/nios2/boot/compressed/console.c | 620 | 2432 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2008-2010 Thomas Chou <thomas@wytron.com.tw>
*/
#include <linux/io.h>
#if (defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) && defined(JTAG_UART_BASE))\
|| (defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE) && defined(UART0_BASE))
static void *my_ioremap(unsigned long physaddr)
{
return (void *)(physaddr | CONFIG_NIOS2_IO_REGION_BASE);
}
#endif
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) && defined(JTAG_UART_BASE)
#define ALTERA_JTAGUART_SIZE 8
#define ALTERA_JTAGUART_DATA_REG 0
#define ALTERA_JTAGUART_CONTROL_REG 4
#define ALTERA_JTAGUART_CONTROL_AC_MSK (0x00000400)
#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK (0xFFFF0000)
static void *uartbase;
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
static void jtag_putc(int ch)
{
if (readl(uartbase + ALTERA_JTAGUART_CONTROL_REG) &
ALTERA_JTAGUART_CONTROL_WSPACE_MSK)
writeb(ch, uartbase + ALTERA_JTAGUART_DATA_REG);
}
#else
static void jtag_putc(int ch)
{
while ((readl(uartbase + ALTERA_JTAGUART_CONTROL_REG) &
ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0)
;
writeb(ch, uartbase + ALTERA_JTAGUART_DATA_REG);
}
#endif
static int putchar(int ch)
{
jtag_putc(ch);
return ch;
}
static void console_init(void)
{
uartbase = my_ioremap((unsigned long) JTAG_UART_BASE);
writel(ALTERA_JTAGUART_CONTROL_AC_MSK,
uartbase + ALTERA_JTAGUART_CONTROL_REG);
}
#elif defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE) && defined(UART0_BASE)
#define ALTERA_UART_SIZE 32
#define ALTERA_UART_TXDATA_REG 4
#define ALTERA_UART_STATUS_REG 8
#define ALTERA_UART_DIVISOR_REG 16
#define ALTERA_UART_STATUS_TRDY_MSK (0x40)
static unsigned uartbase;
static void uart_putc(int ch)
{
int i;
for (i = 0; (i < 0x10000); i++) {
if (readw(uartbase + ALTERA_UART_STATUS_REG) &
ALTERA_UART_STATUS_TRDY_MSK)
break;
}
writeb(ch, uartbase + ALTERA_UART_TXDATA_REG);
}
static int putchar(int ch)
{
uart_putc(ch);
if (ch == '\n')
uart_putc('\r');
return ch;
}
static void console_init(void)
{
unsigned int baud, baudclk;
uartbase = (unsigned long) my_ioremap((unsigned long) UART0_BASE);
baud = CONFIG_SERIAL_ALTERA_UART_BAUDRATE;
baudclk = UART0_FREQ / baud;
writew(baudclk, uartbase + ALTERA_UART_DIVISOR_REG);
}
#else
static int putchar(int ch)
{
return ch;
}
static void console_init(void)
{
}
#endif
static int puts(const char *s)
{
while (*s)
putchar(*s++);
return 0;
}
| gpl-2.0 |
halfline/linux | drivers/staging/speakup/speakup_audptr.c | 620 | 5903 | /*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#include "serialio.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18 /* flush synth buffer */
#define PROCSPEECH '\r' /* start synth processing speech char */
static int synth_probe(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"\x05[f99]" } },
{ CAPS_STOP, .u.s = {"\x05[f80]" } },
{ RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } },
{ PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } },
{ VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } },
{ TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, NULL } },
{ PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/audptr.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_audptr = {
.name = "audptr",
.version = DRV_VERSION,
.long_name = "Audapter",
.init = "\x05[D1]\x05[Ol]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 400,
.trigger = 50,
.jiffies = 30,
.full = 18000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.probe = synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "audptr",
},
};
static void synth_flush(struct spk_synth *synth)
{
int timeout = SPK_XMITR_TIMEOUT;
while (spk_serial_tx_busy()) {
if (!--timeout)
break;
udelay(1);
}
outb(SYNTH_CLEAR, speakup_info.port_tts);
spk_serial_out(PROCSPEECH);
}
static void synth_version(struct spk_synth *synth)
{
unsigned char test = 0;
char synth_id[40] = "";
spk_synth_immediate(synth, "\x05[Q]");
synth_id[test] = spk_serial_in();
if (synth_id[test] == 'A') {
do {
/* read version string from synth */
synth_id[++test] = spk_serial_in();
} while (synth_id[test] != '\n' && test < 32);
synth_id[++test] = 0x00;
}
if (synth_id[0] == 'A')
pr_info("%s version: %s", synth->long_name, synth_id);
}
static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
failed = spk_serial_synth_probe(synth);
if (failed == 0)
synth_version(synth);
synth->alive = !failed;
return 0;
}
module_param_named(ser, synth_audptr.ser, int, S_IRUGO);
module_param_named(start, synth_audptr.startup, short, S_IRUGO);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
module_spk_synth(synth_audptr);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Audapter synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
cricard13/linux-raspberry-nfc | fs/signalfd.c | 876 | 9376 | /*
* fs/signalfd.c
*
* Copyright (C) 2003 Linus Torvalds
*
* Mon Mar 5, 2007: Davide Libenzi <davidel@xmailserver.org>
* Changed ->read() to return a siginfo strcture instead of signal number.
* Fixed locking in ->poll().
* Added sighand-detach notification.
* Added fd re-use in sys_signalfd() syscall.
* Now using anonymous inode source.
* Thanks to Oleg Nesterov for useful code review and suggestions.
* More comments and suggestions from Arnd Bergmann.
* Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
* Retrieve multiple signals with one read() call
* Sun Jul 15, 2007: Davide Libenzi <davidel@xmailserver.org>
* Attach to the sighand only during read() and poll().
*/
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <linux/signalfd.h>
#include <linux/syscalls.h>
#include <linux/proc_fs.h>
#include <linux/compat.h>
void signalfd_cleanup(struct sighand_struct *sighand)
{
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
/*
* The lockless check can race with remove_wait_queue() in progress,
* but in this case its caller should run under rcu_read_lock() and
* sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
*/
if (likely(!waitqueue_active(wqh)))
return;
/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
wake_up_poll(wqh, POLLHUP | POLLFREE);
}
struct signalfd_ctx {
sigset_t sigmask;
};
static int signalfd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static unsigned int signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
unsigned int events = 0;
poll_wait(file, ¤t->sighand->signalfd_wqh, wait);
spin_lock_irq(¤t->sighand->siglock);
if (next_signal(¤t->pending, &ctx->sigmask) ||
next_signal(¤t->signal->shared_pending,
&ctx->sigmask))
events |= POLLIN;
spin_unlock_irq(¤t->sighand->siglock);
return events;
}
/*
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
siginfo_t const *kinfo)
{
long err;
BUILD_BUG_ON(sizeof(struct signalfd_siginfo) != 128);
/*
* Unused members should be zero ...
*/
err = __clear_user(uinfo, sizeof(*uinfo));
/*
* If you change siginfo_t structure, please be sure
* this code is fixed accordingly.
*/
err |= __put_user(kinfo->si_signo, &uinfo->ssi_signo);
err |= __put_user(kinfo->si_errno, &uinfo->ssi_errno);
err |= __put_user((short) kinfo->si_code, &uinfo->ssi_code);
switch (kinfo->si_code & __SI_MASK) {
case __SI_KILL:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
break;
case __SI_TIMER:
err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
case __SI_POLL:
err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
err |= __put_user(kinfo->si_fd, &uinfo->ssi_fd);
break;
case __SI_FAULT:
err |= __put_user((long) kinfo->si_addr, &uinfo->ssi_addr);
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
#endif
#ifdef BUS_MCEERR_AO
/*
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
if (kinfo->si_signo == SIGBUS &&
(kinfo->si_code == BUS_MCEERR_AR ||
kinfo->si_code == BUS_MCEERR_AO))
err |= __put_user((short) kinfo->si_addr_lsb,
&uinfo->ssi_addr_lsb);
#endif
break;
case __SI_CHLD:
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user(kinfo->si_status, &uinfo->ssi_status);
err |= __put_user(kinfo->si_utime, &uinfo->ssi_utime);
err |= __put_user(kinfo->si_stime, &uinfo->ssi_stime);
break;
case __SI_RT: /* This is not generated by the kernel as of now. */
case __SI_MESGQ: /* But this is */
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
default:
/*
* This case catches also the signals queued by sigqueue().
*/
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
}
return err ? -EFAULT: sizeof(*uinfo);
}
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
int nonblock)
{
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(¤t->sighand->siglock);
ret = dequeue_signal(current, &ctx->sigmask, info);
switch (ret) {
case 0:
if (!nonblock)
break;
ret = -EAGAIN;
default:
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
add_wait_queue(¤t->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = dequeue_signal(current, &ctx->sigmask, info);
if (ret != 0)
break;
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
spin_unlock_irq(¤t->sighand->siglock);
schedule();
spin_lock_irq(¤t->sighand->siglock);
}
spin_unlock_irq(¤t->sighand->siglock);
remove_wait_queue(¤t->sighand->signalfd_wqh, &wait);
__set_current_state(TASK_RUNNING);
return ret;
}
/*
* Returns a multiple of the size of a "struct signalfd_siginfo", or a negative
* error code. The "count" parameter must be at least the size of a
* "struct signalfd_siginfo".
*/
static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct signalfd_ctx *ctx = file->private_data;
struct signalfd_siginfo __user *siginfo;
int nonblock = file->f_flags & O_NONBLOCK;
ssize_t ret, total = 0;
siginfo_t info;
count /= sizeof(struct signalfd_siginfo);
if (!count)
return -EINVAL;
siginfo = (struct signalfd_siginfo __user *) buf;
do {
ret = signalfd_dequeue(ctx, &info, nonblock);
if (unlikely(ret <= 0))
break;
ret = signalfd_copyinfo(siginfo, &info);
if (ret < 0)
break;
siginfo++;
total += ret;
nonblock = 1;
} while (--count);
return total ? total: ret;
}
#ifdef CONFIG_PROC_FS
static void signalfd_show_fdinfo(struct seq_file *m, struct file *f)
{
struct signalfd_ctx *ctx = f->private_data;
sigset_t sigmask;
sigmask = ctx->sigmask;
signotset(&sigmask);
render_sigset_t(m, "sigmask:\t", &sigmask);
}
#endif
static const struct file_operations signalfd_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = signalfd_show_fdinfo,
#endif
.release = signalfd_release,
.poll = signalfd_poll,
.read = signalfd_read,
.llseek = noop_llseek,
};
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask, int, flags)
{
sigset_t sigmask;
struct signalfd_ctx *ctx;
/* Check the SFD_* constants for consistency. */
BUILD_BUG_ON(SFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(SFD_NONBLOCK != O_NONBLOCK);
if (flags & ~(SFD_CLOEXEC | SFD_NONBLOCK))
return -EINVAL;
if (sizemask != sizeof(sigset_t) ||
copy_from_user(&sigmask, user_mask, sizeof(sigmask)))
return -EINVAL;
sigdelsetmask(&sigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
signotset(&sigmask);
if (ufd == -1) {
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->sigmask = sigmask;
/*
* When we call this, the initialization must be complete, since
* anon_inode_getfd() will install the fd.
*/
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
if (ufd < 0)
kfree(ctx);
} else {
struct fd f = fdget(ufd);
if (!f.file)
return -EBADF;
ctx = f.file->private_data;
if (f.file->f_op != &signalfd_fops) {
fdput(f);
return -EINVAL;
}
spin_lock_irq(¤t->sighand->siglock);
ctx->sigmask = sigmask;
spin_unlock_irq(¤t->sighand->siglock);
wake_up(¤t->sighand->signalfd_wqh);
fdput(f);
}
return ufd;
}
SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
size_t, sizemask)
{
return sys_signalfd4(ufd, user_mask, sizemask, 0);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(signalfd4, int, ufd,
const compat_sigset_t __user *,sigmask,
compat_size_t, sigsetsize,
int, flags)
{
compat_sigset_t ss32;
sigset_t tmp;
sigset_t __user *ksigmask;
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&ss32, sigmask, sizeof(ss32)))
return -EFAULT;
sigset_from_compat(&tmp, &ss32);
ksigmask = compat_alloc_user_space(sizeof(sigset_t));
if (copy_to_user(ksigmask, &tmp, sizeof(sigset_t)))
return -EFAULT;
return sys_signalfd4(ufd, ksigmask, sizeof(sigset_t), flags);
}
COMPAT_SYSCALL_DEFINE3(signalfd, int, ufd,
const compat_sigset_t __user *,sigmask,
compat_size_t, sigsetsize)
{
return compat_sys_signalfd4(ufd, sigmask, sigsetsize, 0);
}
#endif
| gpl-2.0 |
Warter21/linux-4.0_imx6 | arch/arm/mach-clps711x/board-p720t.c | 1388 | 11996 | /*
* linux/arch/arm/mach-clps711x/p720t.c
*
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <linux/sizes.h>
#include <linux/backlight.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
#include <mach/hardware.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <video/platform_lcd.h>
#include "common.h"
#include "devices.h"
#define P720T_USERLED CLPS711X_GPIO(3, 0)
#define P720T_NAND_CLE CLPS711X_GPIO(4, 0)
#define P720T_NAND_ALE CLPS711X_GPIO(4, 1)
#define P720T_NAND_NCE CLPS711X_GPIO(4, 2)
#define P720T_NAND_BASE (CLPS711X_SDRAM1_BASE)
#define P720T_MMGPIO_BASE (CLPS711X_NR_GPIO)
#define SYSPLD_PHYS_BASE IOMEM(CS1_PHYS_BASE)
#define PLD_INT (SYSPLD_PHYS_BASE + 0x000000)
#define PLD_INT_MMGPIO_BASE (P720T_MMGPIO_BASE + 0)
#define PLD_INT_PENIRQ (PLD_INT_MMGPIO_BASE + 5)
#define PLD_INT_UCB_IRQ (PLD_INT_MMGPIO_BASE + 1)
#define PLD_INT_KBD_ATN (PLD_INT_MMGPIO_BASE + 0) /* EINT1 */
#define PLD_PWR (SYSPLD_PHYS_BASE + 0x000004)
#define PLD_PWR_MMGPIO_BASE (P720T_MMGPIO_BASE + 8)
#define PLD_PWR_EXT (PLD_PWR_MMGPIO_BASE + 5)
#define PLD_PWR_MODE (PLD_PWR_MMGPIO_BASE + 4) /* 1 = PWM, 0 = PFM */
#define PLD_S4_ON (PLD_PWR_MMGPIO_BASE + 3) /* LCD bias voltage enable */
#define PLD_S3_ON (PLD_PWR_MMGPIO_BASE + 2) /* LCD backlight enable */
#define PLD_S2_ON (PLD_PWR_MMGPIO_BASE + 1) /* LCD 3V3 supply enable */
#define PLD_S1_ON (PLD_PWR_MMGPIO_BASE + 0) /* LCD 3V supply enable */
#define PLD_KBD (SYSPLD_PHYS_BASE + 0x000008)
#define PLD_KBD_MMGPIO_BASE (P720T_MMGPIO_BASE + 16)
#define PLD_KBD_WAKE (PLD_KBD_MMGPIO_BASE + 1)
#define PLD_KBD_EN (PLD_KBD_MMGPIO_BASE + 0)
#define PLD_SPI (SYSPLD_PHYS_BASE + 0x00000c)
#define PLD_SPI_MMGPIO_BASE (P720T_MMGPIO_BASE + 24)
#define PLD_SPI_EN (PLD_SPI_MMGPIO_BASE + 0)
#define PLD_IO (SYSPLD_PHYS_BASE + 0x000010)
#define PLD_IO_MMGPIO_BASE (P720T_MMGPIO_BASE + 32)
#define PLD_IO_BOOTSEL (PLD_IO_MMGPIO_BASE + 6) /* Boot sel switch */
#define PLD_IO_USER (PLD_IO_MMGPIO_BASE + 5) /* User defined switch */
#define PLD_IO_LED3 (PLD_IO_MMGPIO_BASE + 4)
#define PLD_IO_LED2 (PLD_IO_MMGPIO_BASE + 3)
#define PLD_IO_LED1 (PLD_IO_MMGPIO_BASE + 2)
#define PLD_IO_LED0 (PLD_IO_MMGPIO_BASE + 1)
#define PLD_IO_LEDEN (PLD_IO_MMGPIO_BASE + 0)
#define PLD_IRDA (SYSPLD_PHYS_BASE + 0x000014)
#define PLD_IRDA_MMGPIO_BASE (P720T_MMGPIO_BASE + 40)
#define PLD_IRDA_EN (PLD_IRDA_MMGPIO_BASE + 0)
#define PLD_COM2 (SYSPLD_PHYS_BASE + 0x000018)
#define PLD_COM2_MMGPIO_BASE (P720T_MMGPIO_BASE + 48)
#define PLD_COM2_EN (PLD_COM2_MMGPIO_BASE + 0)
#define PLD_COM1 (SYSPLD_PHYS_BASE + 0x00001c)
#define PLD_COM1_MMGPIO_BASE (P720T_MMGPIO_BASE + 56)
#define PLD_COM1_EN (PLD_COM1_MMGPIO_BASE + 0)
#define PLD_AUD (SYSPLD_PHYS_BASE + 0x000020)
#define PLD_AUD_MMGPIO_BASE (P720T_MMGPIO_BASE + 64)
#define PLD_AUD_DIV1 (PLD_AUD_MMGPIO_BASE + 6)
#define PLD_AUD_DIV0 (PLD_AUD_MMGPIO_BASE + 5)
#define PLD_AUD_CLK_SEL1 (PLD_AUD_MMGPIO_BASE + 4)
#define PLD_AUD_CLK_SEL0 (PLD_AUD_MMGPIO_BASE + 3)
#define PLD_AUD_MIC_PWR (PLD_AUD_MMGPIO_BASE + 2)
#define PLD_AUD_MIC_GAIN (PLD_AUD_MMGPIO_BASE + 1)
#define PLD_AUD_CODEC_EN (PLD_AUD_MMGPIO_BASE + 0)
#define PLD_CF (SYSPLD_PHYS_BASE + 0x000024)
#define PLD_CF_MMGPIO_BASE (P720T_MMGPIO_BASE + 72)
#define PLD_CF2_SLEEP (PLD_CF_MMGPIO_BASE + 5)
#define PLD_CF1_SLEEP (PLD_CF_MMGPIO_BASE + 4)
#define PLD_CF2_nPDREQ (PLD_CF_MMGPIO_BASE + 3)
#define PLD_CF1_nPDREQ (PLD_CF_MMGPIO_BASE + 2)
#define PLD_CF2_nIRQ (PLD_CF_MMGPIO_BASE + 1)
#define PLD_CF1_nIRQ (PLD_CF_MMGPIO_BASE + 0)
#define PLD_SDC (SYSPLD_PHYS_BASE + 0x000028)
#define PLD_SDC_MMGPIO_BASE (P720T_MMGPIO_BASE + 80)
#define PLD_SDC_INT_EN (PLD_SDC_MMGPIO_BASE + 2)
#define PLD_SDC_WP (PLD_SDC_MMGPIO_BASE + 1)
#define PLD_SDC_CD (PLD_SDC_MMGPIO_BASE + 0)
#define PLD_CODEC (SYSPLD_PHYS_BASE + 0x400000)
#define PLD_CODEC_MMGPIO_BASE (P720T_MMGPIO_BASE + 88)
#define PLD_CODEC_IRQ3 (PLD_CODEC_MMGPIO_BASE + 4)
#define PLD_CODEC_IRQ2 (PLD_CODEC_MMGPIO_BASE + 3)
#define PLD_CODEC_IRQ1 (PLD_CODEC_MMGPIO_BASE + 2)
#define PLD_CODEC_EN (PLD_CODEC_MMGPIO_BASE + 0)
#define PLD_BRITE (SYSPLD_PHYS_BASE + 0x400004)
#define PLD_BRITE_MMGPIO_BASE (P720T_MMGPIO_BASE + 96)
#define PLD_BRITE_UP (PLD_BRITE_MMGPIO_BASE + 1)
#define PLD_BRITE_DN (PLD_BRITE_MMGPIO_BASE + 0)
#define PLD_LCDEN (SYSPLD_PHYS_BASE + 0x400008)
#define PLD_LCDEN_MMGPIO_BASE (P720T_MMGPIO_BASE + 104)
#define PLD_LCDEN_EN (PLD_LCDEN_MMGPIO_BASE + 0)
#define PLD_TCH (SYSPLD_PHYS_BASE + 0x400010)
#define PLD_TCH_MMGPIO_BASE (P720T_MMGPIO_BASE + 112)
#define PLD_TCH_PENIRQ (PLD_TCH_MMGPIO_BASE + 1)
#define PLD_TCH_EN (PLD_TCH_MMGPIO_BASE + 0)
#define PLD_GPIO (SYSPLD_PHYS_BASE + 0x400014)
#define PLD_GPIO_MMGPIO_BASE (P720T_MMGPIO_BASE + 120)
#define PLD_GPIO2 (PLD_GPIO_MMGPIO_BASE + 2)
#define PLD_GPIO1 (PLD_GPIO_MMGPIO_BASE + 1)
#define PLD_GPIO0 (PLD_GPIO_MMGPIO_BASE + 0)
static struct gpio p720t_gpios[] __initconst = {
{ PLD_S1_ON, GPIOF_OUT_INIT_LOW, "PLD_S1_ON" },
{ PLD_S2_ON, GPIOF_OUT_INIT_LOW, "PLD_S2_ON" },
{ PLD_S3_ON, GPIOF_OUT_INIT_LOW, "PLD_S3_ON" },
{ PLD_S4_ON, GPIOF_OUT_INIT_LOW, "PLD_S4_ON" },
{ PLD_KBD_EN, GPIOF_OUT_INIT_LOW, "PLD_KBD_EN" },
{ PLD_SPI_EN, GPIOF_OUT_INIT_LOW, "PLD_SPI_EN" },
{ PLD_IO_USER, GPIOF_OUT_INIT_LOW, "PLD_IO_USER" },
{ PLD_IO_LED0, GPIOF_OUT_INIT_LOW, "PLD_IO_LED0" },
{ PLD_IO_LED1, GPIOF_OUT_INIT_LOW, "PLD_IO_LED1" },
{ PLD_IO_LED2, GPIOF_OUT_INIT_LOW, "PLD_IO_LED2" },
{ PLD_IO_LED3, GPIOF_OUT_INIT_LOW, "PLD_IO_LED3" },
{ PLD_IO_LEDEN, GPIOF_OUT_INIT_LOW, "PLD_IO_LEDEN" },
{ PLD_IRDA_EN, GPIOF_OUT_INIT_LOW, "PLD_IRDA_EN" },
{ PLD_COM1_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM1_EN" },
{ PLD_COM2_EN, GPIOF_OUT_INIT_HIGH, "PLD_COM2_EN" },
{ PLD_CODEC_EN, GPIOF_OUT_INIT_LOW, "PLD_CODEC_EN" },
{ PLD_LCDEN_EN, GPIOF_OUT_INIT_LOW, "PLD_LCDEN_EN" },
{ PLD_TCH_EN, GPIOF_OUT_INIT_LOW, "PLD_TCH_EN" },
{ P720T_USERLED,GPIOF_OUT_INIT_LOW, "USER_LED" },
};
static struct resource p720t_mmgpio_resource[] __initdata = {
DEFINE_RES_MEM_NAMED(0, 4, "dat"),
};
static struct bgpio_pdata p720t_mmgpio_pdata = {
.ngpio = 8,
};
static struct platform_device p720t_mmgpio __initdata = {
.name = "basic-mmio-gpio",
.id = -1,
.resource = p720t_mmgpio_resource,
.num_resources = ARRAY_SIZE(p720t_mmgpio_resource),
.dev = {
.platform_data = &p720t_mmgpio_pdata,
},
};
static void __init p720t_mmgpio_init(void __iomem *addrbase, int gpiobase)
{
p720t_mmgpio_resource[0].start = (unsigned long)addrbase;
p720t_mmgpio_pdata.base = gpiobase;
platform_device_register(&p720t_mmgpio);
}
static struct {
void __iomem *addrbase;
int gpiobase;
} mmgpios[] __initconst = {
{ PLD_INT, PLD_INT_MMGPIO_BASE },
{ PLD_PWR, PLD_PWR_MMGPIO_BASE },
{ PLD_KBD, PLD_KBD_MMGPIO_BASE },
{ PLD_SPI, PLD_SPI_MMGPIO_BASE },
{ PLD_IO, PLD_IO_MMGPIO_BASE },
{ PLD_IRDA, PLD_IRDA_MMGPIO_BASE },
{ PLD_COM2, PLD_COM2_MMGPIO_BASE },
{ PLD_COM1, PLD_COM1_MMGPIO_BASE },
{ PLD_AUD, PLD_AUD_MMGPIO_BASE },
{ PLD_CF, PLD_CF_MMGPIO_BASE },
{ PLD_SDC, PLD_SDC_MMGPIO_BASE },
{ PLD_CODEC, PLD_CODEC_MMGPIO_BASE },
{ PLD_BRITE, PLD_BRITE_MMGPIO_BASE },
{ PLD_LCDEN, PLD_LCDEN_MMGPIO_BASE },
{ PLD_TCH, PLD_TCH_MMGPIO_BASE },
{ PLD_GPIO, PLD_GPIO_MMGPIO_BASE },
};
static struct resource p720t_nand_resource[] __initdata = {
DEFINE_RES_MEM(P720T_NAND_BASE, SZ_4),
};
static struct mtd_partition p720t_nand_parts[] __initdata = {
{
.name = "Flash partition 1",
.offset = 0,
.size = SZ_2M,
},
{
.name = "Flash partition 2",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct gpio_nand_platdata p720t_nand_pdata __initdata = {
.gpio_rdy = -1,
.gpio_nce = P720T_NAND_NCE,
.gpio_ale = P720T_NAND_ALE,
.gpio_cle = P720T_NAND_CLE,
.gpio_nwp = -1,
.chip_delay = 15,
.parts = p720t_nand_parts,
.num_parts = ARRAY_SIZE(p720t_nand_parts),
};
static struct platform_device p720t_nand_pdev __initdata = {
.name = "gpio-nand",
.id = -1,
.resource = p720t_nand_resource,
.num_resources = ARRAY_SIZE(p720t_nand_resource),
.dev = {
.platform_data = &p720t_nand_pdata,
},
};
static void p720t_lcd_power_set(struct plat_lcd_data *pd, unsigned int power)
{
if (power) {
gpio_set_value(PLD_LCDEN_EN, 1);
gpio_set_value(PLD_S1_ON, 1);
gpio_set_value(PLD_S2_ON, 1);
gpio_set_value(PLD_S4_ON, 1);
} else {
gpio_set_value(PLD_S1_ON, 0);
gpio_set_value(PLD_S2_ON, 0);
gpio_set_value(PLD_S4_ON, 0);
gpio_set_value(PLD_LCDEN_EN, 0);
}
}
static struct plat_lcd_data p720t_lcd_power_pdata = {
.set_power = p720t_lcd_power_set,
};
static void p720t_lcd_backlight_set_intensity(int intensity)
{
gpio_set_value(PLD_S3_ON, intensity);
}
static struct generic_bl_info p720t_lcd_backlight_pdata = {
.name = "lcd-backlight.0",
.default_intensity = 0x01,
.max_intensity = 0x01,
.set_bl_intensity = p720t_lcd_backlight_set_intensity,
};
static void __init
fixup_p720t(struct tag *tag, char **cmdline)
{
/*
* Our bootloader doesn't setup any tags (yet).
*/
if (tag->hdr.tag != ATAG_CORE) {
tag->hdr.tag = ATAG_CORE;
tag->hdr.size = tag_size(tag_core);
tag->u.core.flags = 0;
tag->u.core.pagesize = PAGE_SIZE;
tag->u.core.rootdev = 0x0100;
tag = tag_next(tag);
tag->hdr.tag = ATAG_MEM;
tag->hdr.size = tag_size(tag_mem32);
tag->u.mem.size = 4096;
tag->u.mem.start = PHYS_OFFSET;
tag = tag_next(tag);
tag->hdr.tag = ATAG_NONE;
tag->hdr.size = 0;
}
}
static struct gpio_led p720t_gpio_leds[] = {
{
.name = "User LED",
.default_trigger = "heartbeat",
.gpio = P720T_USERLED,
},
};
static struct gpio_led_platform_data p720t_gpio_led_pdata __initdata = {
.leds = p720t_gpio_leds,
.num_leds = ARRAY_SIZE(p720t_gpio_leds),
};
static void __init p720t_init(void)
{
int i;
clps711x_devices_init();
for (i = 0; i < ARRAY_SIZE(mmgpios); i++)
p720t_mmgpio_init(mmgpios[i].addrbase, mmgpios[i].gpiobase);
platform_device_register(&p720t_nand_pdev);
}
static void __init p720t_init_late(void)
{
WARN_ON(gpio_request_array(p720t_gpios, ARRAY_SIZE(p720t_gpios)));
platform_device_register_data(NULL, "platform-lcd", 0,
&p720t_lcd_power_pdata,
sizeof(p720t_lcd_power_pdata));
platform_device_register_data(NULL, "generic-bl", 0,
&p720t_lcd_backlight_pdata,
sizeof(p720t_lcd_backlight_pdata));
platform_device_register_simple("video-clps711x", 0, NULL, 0);
platform_device_register_data(NULL, "leds-gpio", 0,
&p720t_gpio_led_pdata,
sizeof(p720t_gpio_led_pdata));
}
MACHINE_START(P720T, "ARM-Prospector720T")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.atag_offset = 0x100,
.fixup = fixup_p720t,
.map_io = clps711x_map_io,
.init_irq = clps711x_init_irq,
.init_time = clps711x_timer_init,
.init_machine = p720t_init,
.init_late = p720t_init_late,
.restart = clps711x_restart,
MACHINE_END
| gpl-2.0 |
shinobisoft/android_kernel_lge_msm8226 | arch/arm/mach-msm/qdsp6v2/adsp-loader.c | 1644 | 5533 | /*
* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <mach/subsystem_restart.h>
#include <mach/qdsp6v2/apr.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
#define Q6_PIL_GET_DELAY_MS 100
#define BOOT_CMD 1
static ssize_t adsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
struct adsp_loader_private {
void *pil_h;
struct kobject *boot_adsp_obj;
struct attribute_group *attr_group;
};
static struct kobj_attribute adsp_boot_attribute =
__ATTR(boot, 0220, NULL, adsp_boot_store);
static struct attribute *attrs[] = {
&adsp_boot_attribute.attr,
NULL,
};
static struct platform_device *adsp_private;
static void adsp_loader_do(struct platform_device *pdev)
{
struct adsp_loader_private *priv = NULL;
const char *adsp_dt = "qcom,adsp-state";
int rc = 0;
u32 adsp_state;
if (!pdev) {
dev_err(&pdev->dev, "%s: Platform device null \n", __func__);
goto fail;
}
if (!pdev->dev.of_node) {
dev_err(&pdev->dev,
"%s: Device tree information missing \n", __func__);
goto fail;
}
rc = of_property_read_u32(pdev->dev.of_node, adsp_dt, &adsp_state);
if (rc) {
dev_err(&pdev->dev,
"%s: ADSP state = %x\n", __func__, adsp_state);
goto fail;
}
if (adsp_state == APR_SUBSYS_DOWN) {
priv = platform_get_drvdata(pdev);
if (!priv) {
dev_err(&pdev->dev,
" %s: Private data get failed\n", __func__);
goto fail;
}
priv->pil_h = subsystem_get("adsp");
if (IS_ERR(priv->pil_h)) {
dev_err(&pdev->dev, "%s: pil get failed,\n",
__func__);
goto fail;
}
/* Set the state of the ADSP in APR driver */
apr_set_q6_state(APR_SUBSYS_LOADED);
} else if (adsp_state == APR_SUBSYS_LOADED) {
dev_dbg(&pdev->dev,
"%s: ADSP state = %x\n", __func__, adsp_state);
apr_set_q6_state(APR_SUBSYS_LOADED);
}
dev_info(&pdev->dev, "%s: Q6/ADSP image is loaded\n", __func__);
return;
fail:
dev_err(&pdev->dev, "%s: Q6/ADSP image loading failed\n", __func__);
return;
}
static ssize_t adsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t count)
{
int boot = 0;
sscanf(buf, "%du", &boot);
if (boot == BOOT_CMD) {
pr_debug("%s:going to call adsp_loader_do", __func__);
adsp_loader_do(adsp_private);
}
return count;
}
static int adsp_loader_init_sysfs(struct platform_device *pdev)
{
int ret = -EINVAL;
struct adsp_loader_private *priv = NULL;
adsp_private = NULL;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "%s: memory alloc failed\n", __func__);
ret = -ENOMEM;
return ret;
}
platform_set_drvdata(pdev, priv);
priv->pil_h = NULL;
priv->boot_adsp_obj = NULL;
priv->attr_group = devm_kzalloc(&pdev->dev,
sizeof(*(priv->attr_group)),
GFP_KERNEL);
if (!priv->attr_group) {
dev_err(&pdev->dev, "%s: malloc attr_group failed\n",
__func__);
ret = -ENOMEM;
goto error_return;
}
priv->attr_group->attrs = attrs;
priv->boot_adsp_obj = kobject_create_and_add("boot_adsp", kernel_kobj);
if (!priv->boot_adsp_obj) {
dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
__func__);
ret = -ENOMEM;
goto error_return;
}
ret = sysfs_create_group(priv->boot_adsp_obj, priv->attr_group);
if (ret) {
dev_err(&pdev->dev, "%s: sysfs create group failed %d\n", \
__func__, ret);
goto error_return;
}
adsp_private = pdev;
return 0;
error_return:
if (priv->boot_adsp_obj) {
kobject_del(priv->boot_adsp_obj);
priv->boot_adsp_obj = NULL;
}
return ret;
}
static int adsp_loader_remove(struct platform_device *pdev)
{
struct adsp_loader_private *priv = NULL;
priv = platform_get_drvdata(pdev);
if (!priv)
return 0;
if (priv->pil_h) {
subsystem_put(priv->pil_h);
priv->pil_h = NULL;
}
if (priv->boot_adsp_obj) {
sysfs_remove_group(priv->boot_adsp_obj, priv->attr_group);
kobject_del(priv->boot_adsp_obj);
priv->boot_adsp_obj = NULL;
}
return 0;
}
static int adsp_loader_probe(struct platform_device *pdev)
{
int ret = adsp_loader_init_sysfs(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
return ret;
}
return 0;
}
static const struct of_device_id adsp_loader_dt_match[] = {
{ .compatible = "qcom,adsp-loader" },
{ }
};
MODULE_DEVICE_TABLE(of, adsp_loader_dt_match);
static struct platform_driver adsp_loader_driver = {
.driver = {
.name = "adsp-loader",
.owner = THIS_MODULE,
.of_match_table = adsp_loader_dt_match,
},
.probe = adsp_loader_probe,
.remove = __devexit_p(adsp_loader_remove),
};
static int __init adsp_loader_init(void)
{
return platform_driver_register(&adsp_loader_driver);
}
module_init(adsp_loader_init);
static void __exit adsp_loader_exit(void)
{
platform_driver_unregister(&adsp_loader_driver);
}
module_exit(adsp_loader_exit);
MODULE_DESCRIPTION("ADSP Loader module");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
samno1607/FreshBake | drivers/leds/led-triggers.c | 2924 | 7441 | /*
* LED Triggers Core
*
* Copyright 2005-2007 Openedhand Ltd.
*
* Author: Richard Purdie <rpurdie@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include "leds.h"
/*
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
static LIST_HEAD(trigger_list);
/* Used by LED Class */
ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
size_t len;
trigger_name[sizeof(trigger_name) - 1] = '\0';
strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
len = strlen(trigger_name);
if (len && trigger_name[len - 1] == '\n')
trigger_name[len - 1] = '\0';
if (!strcmp(trigger_name, "none")) {
led_trigger_remove(led_cdev);
return count;
}
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (!strcmp(trigger_name, trig->name)) {
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
return count;
}
}
up_read(&triggers_list_lock);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(led_trigger_store);
ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
int len = 0;
down_read(&triggers_list_lock);
down_read(&led_cdev->trigger_lock);
if (!led_cdev->trigger)
len += sprintf(buf+len, "[none] ");
else
len += sprintf(buf+len, "none ");
list_for_each_entry(trig, &trigger_list, next_trig) {
if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
trig->name))
len += sprintf(buf+len, "[%s] ", trig->name);
else
len += sprintf(buf+len, "%s ", trig->name);
}
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
len += sprintf(len+buf, "\n");
return len;
}
EXPORT_SYMBOL_GPL(led_trigger_show);
/* Caller must ensure led_cdev->trigger_lock held */
void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
{
unsigned long flags;
/* Remove any existing trigger */
if (led_cdev->trigger) {
write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
list_del(&led_cdev->trig_list);
write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock,
flags);
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
led_cdev->trigger = NULL;
led_brightness_set(led_cdev, LED_OFF);
}
if (trigger) {
write_lock_irqsave(&trigger->leddev_list_lock, flags);
list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
led_cdev->trigger = trigger;
if (trigger->activate)
trigger->activate(led_cdev);
}
}
EXPORT_SYMBOL_GPL(led_trigger_set);
void led_trigger_remove(struct led_classdev *led_cdev)
{
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_remove);
void led_trigger_set_default(struct led_classdev *led_cdev)
{
struct led_trigger *trig;
if (!led_cdev->default_trigger)
return;
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
if (!strcmp(led_cdev->default_trigger, trig->name))
led_trigger_set(led_cdev, trig);
}
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_set_default);
/* LED Trigger Interface */
int led_trigger_register(struct led_trigger *trigger)
{
struct led_classdev *led_cdev;
struct led_trigger *trig;
rwlock_init(&trigger->leddev_list_lock);
INIT_LIST_HEAD(&trigger->led_cdevs);
down_write(&triggers_list_lock);
/* Make sure the trigger's name isn't already in use */
list_for_each_entry(trig, &trigger_list, next_trig) {
if (!strcmp(trig->name, trigger->name)) {
up_write(&triggers_list_lock);
return -EEXIST;
}
}
/* Add to the list of led triggers */
list_add_tail(&trigger->next_trig, &trigger_list);
up_write(&triggers_list_lock);
/* Register with any LEDs that have this as a default trigger */
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (!led_cdev->trigger && led_cdev->default_trigger &&
!strcmp(led_cdev->default_trigger, trigger->name))
led_trigger_set(led_cdev, trigger);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(led_trigger_register);
void led_trigger_unregister(struct led_trigger *trigger)
{
struct led_classdev *led_cdev;
/* Remove from the list of led triggers */
down_write(&triggers_list_lock);
list_del(&trigger->next_trig);
up_write(&triggers_list_lock);
/* Remove anyone actively using this trigger */
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (led_cdev->trigger == trigger)
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister);
/* Simple LED Tigger Interface */
void led_trigger_event(struct led_trigger *trigger,
enum led_brightness brightness)
{
struct list_head *entry;
if (!trigger)
return;
read_lock(&trigger->leddev_list_lock);
list_for_each(entry, &trigger->led_cdevs) {
struct led_classdev *led_cdev;
led_cdev = list_entry(entry, struct led_classdev, trig_list);
led_set_brightness(led_cdev, brightness);
}
read_unlock(&trigger->leddev_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
void led_trigger_blink(struct led_trigger *trigger,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct list_head *entry;
if (!trigger)
return;
read_lock(&trigger->leddev_list_lock);
list_for_each(entry, &trigger->led_cdevs) {
struct led_classdev *led_cdev;
led_cdev = list_entry(entry, struct led_classdev, trig_list);
led_blink_set(led_cdev, delay_on, delay_off);
}
read_unlock(&trigger->leddev_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_blink);
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
struct led_trigger *trigger;
int err;
trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
if (trigger) {
trigger->name = name;
err = led_trigger_register(trigger);
if (err < 0)
printk(KERN_WARNING "LED trigger %s failed to register"
" (%d)\n", name, err);
} else
printk(KERN_WARNING "LED trigger %s failed to register"
" (no memory)\n", name);
*tp = trigger;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
void led_trigger_unregister_simple(struct led_trigger *trigger)
{
if (trigger)
led_trigger_unregister(trigger);
kfree(trigger);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
MODULE_AUTHOR("Richard Purdie");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LED Triggers Core");
| gpl-2.0 |
guilhem/LGE975_G_Kitkat_Android_V20a_Kernel | fs/nfs/pnfs.c | 3180 | 41415 | /*
* pNFS functions to call and manage layout drivers.
*
* Copyright (c) 2002 [year of first publication]
* The Regents of the University of Michigan
* All Rights Reserved
*
* Dean Hildebrand <dhildebz@umich.edu>
*
* Permission is granted to use, copy, create derivative works, and
* redistribute this software and such derivative works for any purpose,
* so long as the name of the University of Michigan is not used in
* any advertising or publicity pertaining to the use or distribution
* of this software without specific, written prior authorization. If
* the above copyright notice or any other identification of the
* University of Michigan is included in any copy of any portion of
* this software, then the disclaimer below must also be included.
*
* This software is provided as is, without representation or warranty
* of any kind either express or implied, including without limitation
* the implied warranties of merchantability, fitness for a particular
* purpose, or noninfringement. The Regents of the University of
* Michigan shall not be liable for any damages, including special,
* indirect, incidental, or consequential damages, with respect to any
* claim arising out of or in connection with the use of the software,
* even if it has been or is hereafter advised of the possibility of
* such damages.
*/
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include "internal.h"
#include "pnfs.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
/* Locking:
*
* pnfs_spinlock:
* protects pnfs_modules_tbl.
*/
static DEFINE_SPINLOCK(pnfs_spinlock);
/*
* pnfs_modules_tbl holds all pnfs modules
*/
static LIST_HEAD(pnfs_modules_tbl);
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
struct pnfs_layoutdriver_type *local;
list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
if (local->id == id)
goto out;
local = NULL;
out:
dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
return local;
}
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
struct pnfs_layoutdriver_type *local;
spin_lock(&pnfs_spinlock);
local = find_pnfs_driver_locked(id);
spin_unlock(&pnfs_spinlock);
return local;
}
void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
if (nfss->pnfs_curr_ld) {
if (nfss->pnfs_curr_ld->clear_layoutdriver)
nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
module_put(nfss->pnfs_curr_ld->owner);
}
nfss->pnfs_curr_ld = NULL;
}
/*
* Try to set the server's pnfs module to the pnfs layout type specified by id.
* Currently only one pNFS layout driver per filesystem is supported.
*
* @id layout type. Zero (illegal layout type) indicates pNFS not in use.
*/
void
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
u32 id)
{
struct pnfs_layoutdriver_type *ld_type = NULL;
if (id == 0)
goto out_no_driver;
if (!(server->nfs_client->cl_exchange_flags &
(EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
__func__, id, server->nfs_client->cl_exchange_flags);
goto out_no_driver;
}
ld_type = find_pnfs_driver(id);
if (!ld_type) {
request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
ld_type = find_pnfs_driver(id);
if (!ld_type) {
dprintk("%s: No pNFS module found for %u.\n",
__func__, id);
goto out_no_driver;
}
}
if (!try_module_get(ld_type->owner)) {
dprintk("%s: Could not grab reference on module\n", __func__);
goto out_no_driver;
}
server->pnfs_curr_ld = ld_type;
if (ld_type->set_layoutdriver
&& ld_type->set_layoutdriver(server, mntfh)) {
printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
"driver %u.\n", __func__, id);
module_put(ld_type->owner);
goto out_no_driver;
}
dprintk("%s: pNFS module for %u set\n", __func__, id);
return;
out_no_driver:
dprintk("%s: Using NFSv4 I/O\n", __func__);
server->pnfs_curr_ld = NULL;
}
int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
int status = -EINVAL;
struct pnfs_layoutdriver_type *tmp;
if (ld_type->id == 0) {
printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
return status;
}
if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
printk(KERN_ERR "NFS: %s Layout driver must provide "
"alloc_lseg and free_lseg.\n", __func__);
return status;
}
spin_lock(&pnfs_spinlock);
tmp = find_pnfs_driver_locked(ld_type->id);
if (!tmp) {
list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
status = 0;
dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
ld_type->name);
} else {
printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
__func__, ld_type->id);
}
spin_unlock(&pnfs_spinlock);
return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
spin_lock(&pnfs_spinlock);
list_del(&ld_type->pnfs_tblid);
spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
/*
* pNFS client layout cache
*/
/* Need to hold i_lock if caller does not already hold reference */
void
get_layout_hdr(struct pnfs_layout_hdr *lo)
{
atomic_inc(&lo->plh_refcount);
}
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
}
static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
put_rpccred(lo->plh_lc_cred);
return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
}
static void
destroy_layout_hdr(struct pnfs_layout_hdr *lo)
{
dprintk("%s: freeing layout cache %p\n", __func__, lo);
BUG_ON(!list_empty(&lo->plh_layouts));
NFS_I(lo->plh_inode)->layout = NULL;
pnfs_free_layout_hdr(lo);
}
static void
put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
{
if (atomic_dec_and_test(&lo->plh_refcount))
destroy_layout_hdr(lo);
}
void
put_layout_hdr(struct pnfs_layout_hdr *lo)
{
struct inode *inode = lo->plh_inode;
if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
destroy_layout_hdr(lo);
spin_unlock(&inode->i_lock);
}
}
static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{
INIT_LIST_HEAD(&lseg->pls_list);
INIT_LIST_HEAD(&lseg->pls_lc_list);
atomic_set(&lseg->pls_refcount, 1);
smp_mb();
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo;
}
static void free_lseg(struct pnfs_layout_segment *lseg)
{
struct inode *ino = lseg->pls_layout->plh_inode;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
/* Matched by get_layout_hdr in pnfs_insert_layout */
put_layout_hdr(NFS_I(ino)->layout);
}
static void
put_lseg_common(struct pnfs_layout_segment *lseg)
{
struct inode *inode = lseg->pls_layout->plh_inode;
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
list_del_init(&lseg->pls_list);
if (list_empty(&lseg->pls_layout->plh_segs)) {
set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
/* Matched by initial refcount set in alloc_init_layout_hdr */
put_layout_hdr_locked(lseg->pls_layout);
}
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}
void
put_lseg(struct pnfs_layout_segment *lseg)
{
struct inode *inode;
if (!lseg)
return;
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
inode = lseg->pls_layout->plh_inode;
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
LIST_HEAD(free_me);
put_lseg_common(lseg);
list_add(&lseg->pls_list, &free_me);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&free_me);
}
}
EXPORT_SYMBOL_GPL(put_lseg);
static inline u64
end_offset(u64 start, u64 len)
{
u64 end;
end = start + len;
return end >= start ? end : NFS4_MAX_UINT64;
}
/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{
u64 end;
BUG_ON(!len);
end = start + len;
return end > start ? end - 1 : NFS4_MAX_UINT64;
}
/*
* is l2 fully contained in l1?
* start1 end1
* [----------------------------------)
* start2 end2
* [----------------)
*/
static inline int
lo_seg_contained(struct pnfs_layout_range *l1,
struct pnfs_layout_range *l2)
{
u64 start1 = l1->offset;
u64 end1 = end_offset(start1, l1->length);
u64 start2 = l2->offset;
u64 end2 = end_offset(start2, l2->length);
return (start1 <= start2) && (end1 >= end2);
}
/*
* is l1 and l2 intersecting?
* start1 end1
* [----------------------------------)
* start2 end2
* [----------------)
*/
static inline int
lo_seg_intersecting(struct pnfs_layout_range *l1,
struct pnfs_layout_range *l2)
{
u64 start1 = l1->offset;
u64 end1 = end_offset(start1, l1->length);
u64 start2 = l2->offset;
u64 end2 = end_offset(start2, l2->length);
return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
(end2 == NFS4_MAX_UINT64 || end2 > start1);
}
static bool
should_free_lseg(struct pnfs_layout_range *lseg_range,
struct pnfs_layout_range *recall_range)
{
return (recall_range->iomode == IOMODE_ANY ||
lseg_range->iomode == recall_range->iomode) &&
lo_seg_intersecting(lseg_range, recall_range);
}
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
{
int rv = 0;
if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
/* Remove the reference keeping the lseg in the
* list. It will now be removed when all
* outstanding io is finished.
*/
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount));
if (atomic_dec_and_test(&lseg->pls_refcount)) {
put_lseg_common(lseg);
list_add(&lseg->pls_list, tmp_list);
rv = 1;
}
}
return rv;
}
/* Returns count of number of matching invalid lsegs remaining in list
* after call.
*/
int
mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
struct pnfs_layout_range *recall_range)
{
struct pnfs_layout_segment *lseg, *next;
int invalid = 0, removed = 0;
dprintk("%s:Begin lo %p\n", __func__, lo);
if (list_empty(&lo->plh_segs)) {
if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
put_layout_hdr_locked(lo);
return 0;
}
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (!recall_range ||
should_free_lseg(&lseg->pls_range, recall_range)) {
dprintk("%s: freeing lseg %p iomode %d "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
lseg->pls_range.length);
invalid++;
removed += mark_lseg_invalid(lseg, tmp_list);
}
dprintk("%s:Return %i\n", __func__, invalid - removed);
return invalid - removed;
}
/* note free_me must contain lsegs from a single layout_hdr */
void
pnfs_free_lseg_list(struct list_head *free_me)
{
struct pnfs_layout_segment *lseg, *tmp;
struct pnfs_layout_hdr *lo;
if (list_empty(free_me))
return;
lo = list_first_entry(free_me, struct pnfs_layout_segment,
pls_list)->pls_layout;
if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
struct nfs_client *clp;
clp = NFS_SERVER(lo->plh_inode)->nfs_client;
spin_lock(&clp->cl_lock);
list_del_init(&lo->plh_layouts);
spin_unlock(&clp->cl_lock);
}
list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
list_del(&lseg->pls_list);
free_lseg(lseg);
}
}
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
struct pnfs_layout_hdr *lo;
LIST_HEAD(tmp_list);
spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout;
if (lo) {
lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
}
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
}
/*
* Called by the state manger to remove all layouts established under an
* expired lease.
*/
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
struct nfs_server *server;
struct pnfs_layout_hdr *lo;
LIST_HEAD(tmp_list);
nfs4_deviceid_mark_client_invalid(clp);
nfs4_deviceid_purge_client(clp);
spin_lock(&clp->cl_lock);
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
if (!list_empty(&server->layouts))
list_splice_init(&server->layouts, &tmp_list);
}
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
while (!list_empty(&tmp_list)) {
lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
plh_layouts);
dprintk("%s freeing layout for inode %lu\n", __func__,
lo->plh_inode->i_ino);
list_del_init(&lo->plh_layouts);
pnfs_destroy_layout(NFS_I(lo->plh_inode));
}
}
/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
bool update_barrier)
{
u32 oldseq, newseq;
oldseq = be32_to_cpu(lo->plh_stateid.seqid);
newseq = be32_to_cpu(new->seqid);
if ((int)(newseq - oldseq) > 0) {
nfs4_stateid_copy(&lo->plh_stateid, new);
if (update_barrier) {
u32 new_barrier = be32_to_cpu(new->seqid);
if ((int)(new_barrier - lo->plh_barrier))
lo->plh_barrier = new_barrier;
} else {
/* Because of wraparound, we want to keep the barrier
* "close" to the current seqids. It needs to be
* within 2**31 to count as "behind", so if it
* gets too near that limit, give us a litle leeway
* and bring it to within 2**30.
* NOTE - and yes, this is all unsigned arithmetic.
*/
if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
lo->plh_barrier = newseq - (1 << 30);
}
}
}
/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
int lget)
{
if ((stateid) &&
(int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
return true;
return lo->plh_block_lgets ||
test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
(list_empty(&lo->plh_segs) &&
(atomic_read(&lo->plh_outstanding) > lget));
}
int
pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
struct nfs4_state *open_state)
{
int status = 0;
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
status = -EAGAIN;
} else if (list_empty(&lo->plh_segs)) {
int seq;
do {
seq = read_seqbegin(&open_state->seqlock);
nfs4_stateid_copy(dst, &open_state->stateid);
} while (read_seqretry(&open_state->seqlock, seq));
} else
nfs4_stateid_copy(dst, &lo->plh_stateid);
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
return status;
}
/*
* Get layout from server.
* for now, assume that whole file layouts are requested.
* arg->offset: 0
* arg->length: all ones
*/
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_open_context *ctx,
struct pnfs_layout_range *range,
gfp_t gfp_flags)
{
struct inode *ino = lo->plh_inode;
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
struct page **pages = NULL;
int i;
u32 max_resp_sz, max_pages;
dprintk("--> %s\n", __func__);
BUG_ON(ctx == NULL);
lgp = kzalloc(sizeof(*lgp), gfp_flags);
if (lgp == NULL)
return NULL;
/* allocate pages for xdr post processing */
max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
max_pages = nfs_page_array_len(0, max_resp_sz);
pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
if (!pages)
goto out_err_free;
for (i = 0; i < max_pages; i++) {
pages[i] = alloc_page(gfp_flags);
if (!pages[i])
goto out_err_free;
}
lgp->args.minlength = PAGE_CACHE_SIZE;
if (lgp->args.minlength > range->length)
lgp->args.minlength = range->length;
lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
lgp->args.range = *range;
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
lgp->args.layout.pages = pages;
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
lgp->gfp_flags = gfp_flags;
/* Synchronously retrieve layout information from server and
* store in lseg.
*/
nfs4_proc_layoutget(lgp);
if (!lseg) {
/* remember that LAYOUTGET failed and suspend trying */
set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
}
/* free xdr pages */
for (i = 0; i < max_pages; i++)
__free_page(pages[i]);
kfree(pages);
return lseg;
out_err_free:
/* free any allocated xdr pages, lgp as it's not used */
if (pages) {
for (i = 0; i < max_pages; i++) {
if (!pages[i])
break;
__free_page(pages[i]);
}
kfree(pages);
}
kfree(lgp);
return NULL;
}
/* Initiates a LAYOUTRETURN(FILE) */
int
_pnfs_return_layout(struct inode *ino)
{
struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino);
LIST_HEAD(tmp_list);
struct nfs4_layoutreturn *lrp;
nfs4_stateid stateid;
int status = 0;
dprintk("--> %s\n", __func__);
spin_lock(&ino->i_lock);
lo = nfsi->layout;
if (!lo) {
spin_unlock(&ino->i_lock);
dprintk("%s: no layout to return\n", __func__);
return status;
}
stateid = nfsi->layout->plh_stateid;
/* Reference matched in nfs4_layoutreturn_release */
get_layout_hdr(lo);
mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
lo->plh_block_lgets++;
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
if (unlikely(lrp == NULL)) {
status = -ENOMEM;
set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
put_layout_hdr(lo);
goto out;
}
lrp->args.stateid = stateid;
lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
lrp->args.inode = ino;
lrp->args.layout = lo;
lrp->clp = NFS_SERVER(ino)->nfs_client;
status = nfs4_proc_layoutreturn(lrp);
out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
}
bool pnfs_roc(struct inode *ino)
{
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg, *tmp;
LIST_HEAD(tmp_list);
bool found = false;
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
goto out_nolayout;
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
mark_lseg_invalid(lseg, &tmp_list);
found = true;
}
if (!found)
goto out_nolayout;
lo->plh_block_lgets++;
get_layout_hdr(lo); /* matched in pnfs_roc_release */
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
return true;
out_nolayout:
spin_unlock(&ino->i_lock);
return false;
}
void pnfs_roc_release(struct inode *ino)
{
struct pnfs_layout_hdr *lo;
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
lo->plh_block_lgets--;
put_layout_hdr_locked(lo);
spin_unlock(&ino->i_lock);
}
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
{
struct pnfs_layout_hdr *lo;
spin_lock(&ino->i_lock);
lo = NFS_I(ino)->layout;
if ((int)(barrier - lo->plh_barrier) > 0)
lo->plh_barrier = barrier;
spin_unlock(&ino->i_lock);
}
bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_segment *lseg;
bool found = false;
spin_lock(&ino->i_lock);
list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
found = true;
break;
}
if (!found) {
struct pnfs_layout_hdr *lo = nfsi->layout;
u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
/* Since close does not return a layout stateid for use as
* a barrier, we choose the worst-case barrier.
*/
*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
}
spin_unlock(&ino->i_lock);
return found;
}
/*
* Compare two layout segments for sorting into layout cache.
* We want to preferentially return RW over RO layouts, so ensure those
* are seen first.
*/
static s64
cmp_layout(struct pnfs_layout_range *l1,
struct pnfs_layout_range *l2)
{
s64 d;
/* high offset > low offset */
d = l1->offset - l2->offset;
if (d)
return d;
/* short length > long length */
d = l2->length - l1->length;
if (d)
return d;
/* read > read/write */
return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
}
static void
pnfs_insert_layout(struct pnfs_layout_hdr *lo,
struct pnfs_layout_segment *lseg)
{
struct pnfs_layout_segment *lp;
dprintk("%s:Begin\n", __func__);
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lp, &lo->plh_segs, pls_list) {
if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
continue;
list_add_tail(&lseg->pls_list, &lp->pls_list);
dprintk("%s: inserted lseg %p "
"iomode %d offset %llu length %llu before "
"lp %p iomode %d offset %llu length %llu\n",
__func__, lseg, lseg->pls_range.iomode,
lseg->pls_range.offset, lseg->pls_range.length,
lp, lp->pls_range.iomode, lp->pls_range.offset,
lp->pls_range.length);
goto out;
}
list_add_tail(&lseg->pls_list, &lo->plh_segs);
dprintk("%s: inserted lseg %p "
"iomode %d offset %llu length %llu at tail\n",
__func__, lseg, lseg->pls_range.iomode,
lseg->pls_range.offset, lseg->pls_range.length);
out:
get_layout_hdr(lo);
dprintk("%s:Return\n", __func__);
}
static struct pnfs_layout_hdr *
alloc_init_layout_hdr(struct inode *ino,
struct nfs_open_context *ctx,
gfp_t gfp_flags)
{
struct pnfs_layout_hdr *lo;
lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
if (!lo)
return NULL;
atomic_set(&lo->plh_refcount, 1);
INIT_LIST_HEAD(&lo->plh_layouts);
INIT_LIST_HEAD(&lo->plh_segs);
INIT_LIST_HEAD(&lo->plh_bulk_recall);
lo->plh_inode = ino;
lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
return lo;
}
static struct pnfs_layout_hdr *
pnfs_find_alloc_layout(struct inode *ino,
struct nfs_open_context *ctx,
gfp_t gfp_flags)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *new = NULL;
dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
assert_spin_locked(&ino->i_lock);
if (nfsi->layout) {
if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
return NULL;
else
return nfsi->layout;
}
spin_unlock(&ino->i_lock);
new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
spin_lock(&ino->i_lock);
if (likely(nfsi->layout == NULL)) /* Won the race? */
nfsi->layout = new;
else
pnfs_free_layout_hdr(new);
return nfsi->layout;
}
/*
* iomode matching rules:
* iomode lseg match
* ----- ----- -----
* ANY READ true
* ANY RW true
* RW READ false
* RW RW true
* READ READ true
* READ RW true
*/
static int
is_matching_lseg(struct pnfs_layout_range *ls_range,
struct pnfs_layout_range *range)
{
struct pnfs_layout_range range1;
if ((range->iomode == IOMODE_RW &&
ls_range->iomode != IOMODE_RW) ||
!lo_seg_intersecting(ls_range, range))
return 0;
/* range1 covers only the first byte in the range */
range1 = *range;
range1.length = 1;
return lo_seg_contained(ls_range, &range1);
}
/*
* lookup range in layout
*/
static struct pnfs_layout_segment *
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
struct pnfs_layout_range *range)
{
struct pnfs_layout_segment *lseg, *ret = NULL;
dprintk("%s:Begin\n", __func__);
assert_spin_locked(&lo->plh_inode->i_lock);
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
is_matching_lseg(&lseg->pls_range, range)) {
ret = get_lseg(lseg);
break;
}
if (lseg->pls_range.offset > range->offset)
break;
}
dprintk("%s:Return lseg %p ref %d\n",
__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
return ret;
}
/*
* Layout segment is retreived from the server if not cached.
* The appropriate layout segment is referenced and returned to the caller.
*/
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
gfp_t gfp_flags)
{
struct pnfs_layout_range arg = {
.iomode = iomode,
.offset = pos,
.length = count,
};
unsigned pg_offset;
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_server *server = NFS_SERVER(ino);
struct nfs_client *clp = server->nfs_client;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg = NULL;
bool first = false;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
goto out_unlock;
}
/* Do we even need to bother with this? */
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
dprintk("%s matches recall, use MDS\n", __func__);
goto out_unlock;
}
/* if LAYOUTGET already failed once we don't try again */
if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
goto out_unlock;
/* Check to see if the layout for the given range already exists */
lseg = pnfs_find_lseg(lo, &arg);
if (lseg)
goto out_unlock;
if (pnfs_layoutgets_blocked(lo, NULL, 0))
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
get_layout_hdr(lo);
if (list_empty(&lo->plh_segs))
first = true;
spin_unlock(&ino->i_lock);
if (first) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
spin_lock(&clp->cl_lock);
BUG_ON(!list_empty(&lo->plh_layouts));
list_add_tail(&lo->plh_layouts, &server->layouts);
spin_unlock(&clp->cl_lock);
}
pg_offset = arg.offset & ~PAGE_CACHE_MASK;
if (pg_offset) {
arg.offset -= pg_offset;
arg.length += pg_offset;
}
if (arg.length != NFS4_MAX_UINT64)
arg.length = PAGE_CACHE_ALIGN(arg.length);
lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
if (!lseg && first) {
spin_lock(&clp->cl_lock);
list_del_init(&lo->plh_layouts);
spin_unlock(&clp->cl_lock);
}
atomic_dec(&lo->plh_outstanding);
put_layout_hdr(lo);
out:
dprintk("%s end, state 0x%lx lseg %p\n", __func__,
nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
return lseg;
out_unlock:
spin_unlock(&ino->i_lock);
goto out;
}
EXPORT_SYMBOL_GPL(pnfs_update_layout);
int
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
struct nfs4_layoutget_res *res = &lgp->res;
struct pnfs_layout_segment *lseg;
struct inode *ino = lo->plh_inode;
int status = 0;
/* Inject layout blob into I/O device driver */
lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
if (!lseg || IS_ERR(lseg)) {
if (!lseg)
status = -ENOMEM;
else
status = PTR_ERR(lseg);
dprintk("%s: Could not allocate layout: error %d\n",
__func__, status);
goto out;
}
spin_lock(&ino->i_lock);
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
dprintk("%s forget reply due to recall\n", __func__);
goto out_forget_reply;
}
if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
dprintk("%s forget reply due to state\n", __func__);
goto out_forget_reply;
}
init_lseg(lo, lseg);
lseg->pls_range = res->range;
*lgp->lsegpp = get_lseg(lseg);
pnfs_insert_layout(lo, lseg);
if (res->return_on_close) {
set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
}
/* Done processing layoutget. Set the layout stateid */
pnfs_set_layout_stateid(lo, &res->stateid, false);
spin_unlock(&ino->i_lock);
out:
return status;
out_forget_reply:
spin_unlock(&ino->i_lock);
lseg->pls_layout = lo;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
goto out;
}
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
BUG_ON(pgio->pg_lseg != NULL);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
req->wb_bytes,
IOMODE_READ,
GFP_KERNEL);
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_read_mds(pgio);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
BUG_ON(pgio->pg_lseg != NULL);
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
req->wb_bytes,
IOMODE_RW,
GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_write_mds(pgio);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
bool
pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
return true;
}
bool
pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
return true;
}
bool
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
struct nfs_page *req)
{
if (pgio->pg_lseg == NULL)
return nfs_generic_pg_test(pgio, prev, req);
/*
* Test if a nfs_page is fully contained in the pnfs_layout_range.
* Note that this test makes several assumptions:
* - that the previous nfs_page in the struct nfs_pageio_descriptor
* is known to lie within the range.
* - that the nfs_page being tested is known to be contiguous with the
* previous nfs_page.
* - Layout ranges are page aligned, so we only have to test the
* start offset of the request.
*
* Please also note that 'end_offset' is actually the offset of the
* first byte that lies outside the pnfs_layout_range. FIXME?
*
*/
return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
pgio->pg_lseg->pls_range.length);
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
if (!nfs_pageio_add_request(&pgio, req))
nfs_list_add_request(req, &failed);
}
nfs_pageio_complete(&pgio);
if (!list_empty(&failed)) {
/* For some reason our attempt to resend pages. Mark the
* overall send request as having failed, and let
* nfs_writeback_release_full deal with the error.
*/
list_move(&failed, head);
return -EIO;
}
return 0;
}
/*
* Called by non rpc-based layout drivers
*/
void pnfs_ld_write_done(struct nfs_write_data *data)
{
if (likely(!data->pnfs_error)) {
pnfs_set_layoutcommit(data);
data->mds_ops->rpc_call_done(&data->task, data);
} else {
dprintk("pnfs write error = %d\n", data->pnfs_error);
if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR) {
/* Don't lo_commit on error, Server will needs to
* preform a file recovery.
*/
clear_bit(NFS_INO_LAYOUTCOMMIT,
&NFS_I(data->inode)->flags);
pnfs_return_layout(data->inode);
}
data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
}
put_lseg(data->lseg);
data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_write_data *data)
{
list_splice_tail_init(&data->pages, &desc->pg_list);
if (data->req && list_empty(&data->req->wb_list))
nfs_list_add_request(data->req, &desc->pg_list);
nfs_pageio_reset_write_mds(desc);
desc->pg_recoalesce = 1;
put_lseg(data->lseg);
nfs_writedata_release(data);
}
static enum pnfs_try_status
pnfs_try_to_write_data(struct nfs_write_data *wdata,
const struct rpc_call_ops *call_ops,
struct pnfs_layout_segment *lseg,
int how)
{
struct inode *inode = wdata->inode;
enum pnfs_try_status trypnfs;
struct nfs_server *nfss = NFS_SERVER(inode);
wdata->mds_ops = call_ops;
wdata->lseg = get_lseg(lseg);
dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
inode->i_ino, wdata->args.count, wdata->args.offset, how);
trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
if (trypnfs == PNFS_NOT_ATTEMPTED) {
put_lseg(wdata->lseg);
wdata->lseg = NULL;
} else
nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
static void
pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
{
struct nfs_write_data *data;
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
struct pnfs_layout_segment *lseg = desc->pg_lseg;
desc->pg_lseg = NULL;
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
data = list_entry(head->next, struct nfs_write_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_write_through_mds(desc, data);
}
put_lseg(lseg);
}
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
LIST_HEAD(head);
int ret;
ret = nfs_generic_flush(desc, &head);
if (ret != 0) {
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return ret;
}
pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
return 0;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
{
struct nfs_pageio_descriptor pgio;
put_lseg(data->lseg);
data->lseg = NULL;
dprintk("pnfs write error = %d\n", data->pnfs_error);
if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
PNFS_LAYOUTRET_ON_ERROR)
pnfs_return_layout(data->inode);
nfs_pageio_init_read_mds(&pgio, data->inode);
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
nfs_pageio_add_request(&pgio, req);
}
nfs_pageio_complete(&pgio);
}
/*
* Called by non rpc-based layout drivers
*/
void pnfs_ld_read_done(struct nfs_read_data *data)
{
if (likely(!data->pnfs_error)) {
__nfs4_read_done_cb(data);
data->mds_ops->rpc_call_done(&data->task, data);
} else
pnfs_ld_handle_read_error(data);
put_lseg(data->lseg);
data->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_read_data *data)
{
list_splice_tail_init(&data->pages, &desc->pg_list);
if (data->req && list_empty(&data->req->wb_list))
nfs_list_add_request(data->req, &desc->pg_list);
nfs_pageio_reset_read_mds(desc);
desc->pg_recoalesce = 1;
nfs_readdata_release(data);
}
/*
* Call the appropriate parallel I/O subsystem read function.
*/
static enum pnfs_try_status
pnfs_try_to_read_data(struct nfs_read_data *rdata,
const struct rpc_call_ops *call_ops,
struct pnfs_layout_segment *lseg)
{
struct inode *inode = rdata->inode;
struct nfs_server *nfss = NFS_SERVER(inode);
enum pnfs_try_status trypnfs;
rdata->mds_ops = call_ops;
rdata->lseg = get_lseg(lseg);
dprintk("%s: Reading ino:%lu %u@%llu\n",
__func__, inode->i_ino, rdata->args.count, rdata->args.offset);
trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
if (trypnfs == PNFS_NOT_ATTEMPTED) {
put_lseg(rdata->lseg);
rdata->lseg = NULL;
} else {
nfs_inc_stats(inode, NFSIOS_PNFS_READ);
}
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
static void
pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
{
struct nfs_read_data *data;
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
struct pnfs_layout_segment *lseg = desc->pg_lseg;
desc->pg_lseg = NULL;
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
data = list_entry(head->next, struct nfs_read_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
if (trypnfs == PNFS_NOT_ATTEMPTED)
pnfs_read_through_mds(desc, data);
}
put_lseg(lseg);
}
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
LIST_HEAD(head);
int ret;
ret = nfs_generic_pagein(desc, &head);
if (ret != 0) {
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return ret;
}
pnfs_do_multiple_reads(desc, &head);
return 0;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
/*
* There can be multiple RW segments.
*/
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
{
struct pnfs_layout_segment *lseg;
list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
if (lseg->pls_range.iomode == IOMODE_RW &&
test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
list_add(&lseg->pls_lc_list, listp);
}
}
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
if (lseg->pls_range.iomode == IOMODE_RW) {
dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
} else {
dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
}
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
struct nfs_inode *nfsi = NFS_I(wdata->inode);
loff_t end_pos = wdata->mds_offset + wdata->res.count;
bool mark_as_dirty = false;
spin_lock(&nfsi->vfs_inode.i_lock);
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
mark_as_dirty = true;
dprintk("%s: Set layoutcommit for inode %lu ",
__func__, wdata->inode->i_ino);
}
if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
/* references matched in nfs4_layoutcommit_release */
get_lseg(wdata->lseg);
}
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
spin_unlock(&nfsi->vfs_inode.i_lock);
dprintk("%s: lseg %p end_pos %llu\n",
__func__, wdata->lseg, nfsi->layout->plh_lwb);
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if (mark_as_dirty)
mark_inode_dirty_sync(wdata->inode);
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
struct nfs_server *nfss = NFS_SERVER(data->args.inode);
if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
}
/*
* For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
* NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
* data to disk to allow the server to recover the data if it crashes.
* LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
* is off, and a COMMIT is sent to a data server, or
* if WRITEs to a data server return NFS_DATA_SYNC.
*/
int
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
struct nfs4_layoutcommit_data *data;
struct nfs_inode *nfsi = NFS_I(inode);
loff_t end_pos;
int status = 0;
dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
return 0;
/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
data = kzalloc(sizeof(*data), GFP_NOFS);
if (!data) {
status = -ENOMEM;
goto out;
}
if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
goto out_free;
if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
if (!sync) {
status = -EAGAIN;
goto out_free;
}
status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
nfs_wait_bit_killable, TASK_KILLABLE);
if (status)
goto out_free;
}
INIT_LIST_HEAD(&data->lseg_list);
spin_lock(&inode->i_lock);
if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
spin_unlock(&inode->i_lock);
wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
goto out_free;
}
pnfs_list_write_lseg(inode, &data->lseg_list);
end_pos = nfsi->layout->plh_lwb;
nfsi->layout->plh_lwb = 0;
nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
spin_unlock(&inode->i_lock);
data->args.inode = inode;
data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
nfs_fattr_init(&data->fattr);
data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
data->res.fattr = &data->fattr;
data->args.lastbytewritten = end_pos - 1;
data->res.server = NFS_SERVER(inode);
status = nfs4_proc_layoutcommit(data, sync);
out:
if (status)
mark_inode_dirty_sync(inode);
dprintk("<-- %s status %d\n", __func__, status);
return status;
out_free:
kfree(data);
goto out;
}
| gpl-2.0 |
mightysween/vs980-kernel | drivers/mtd/ubi/eba.c | 4460 | 36878 | /*
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Artem Bityutskiy (Битюцкий Артём)
*/
/*
* The UBI Eraseblock Association (EBA) sub-system.
*
* This sub-system is responsible for I/O to/from logical eraseblock.
*
* Although in this implementation the EBA table is fully kept and managed in
* RAM, which assumes poor scalability, it might be (partially) maintained on
* flash in future implementations.
*
* The EBA sub-system implements per-logical eraseblock locking. Before
* accessing a logical eraseblock it is locked for reading or writing. The
* per-logical eraseblock locking is implemented by means of the lock tree. The
* lock tree is an RB-tree which refers all the currently locked logical
* eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
* They are indexed by (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
* time a logical eraseblock is mapped to a physical eraseblock and it is
* stored in the volume identifier header. This means that each VID header has
* a unique sequence number. The sequence number is only increased an we assume
* 64 bits is enough to never overflow.
*/
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/err.h>
#include "ubi.h"
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
* This function returns next sequence number to use, which is just the current
* global sequence counter value. It also increases the global sequence
* counter.
*/
static unsigned long long next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
spin_lock(&ubi->ltree_lock);
sqnum = ubi->global_sqnum++;
spin_unlock(&ubi->ltree_lock);
return sqnum;
}
/**
* ubi_get_compat - get compatibility flags of a volume.
* @ubi: UBI device description object
* @vol_id: volume ID
*
* This function returns compatibility flags for an internal volume. User
* volumes have no compatibility flags, so %0 is returned.
*/
static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
{
if (vol_id == UBI_LAYOUT_VOLUME_ID)
return UBI_LAYOUT_VOLUME_COMPAT;
return 0;
}
/**
* ltree_lookup - look up the lock tree.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function returns a pointer to the corresponding &struct ubi_ltree_entry
* object if the logical eraseblock is locked and %NULL if it is not.
* @ubi->ltree_lock has to be locked.
*/
static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
int lnum)
{
struct rb_node *p;
p = ubi->ltree.rb_node;
while (p) {
struct ubi_ltree_entry *le;
le = rb_entry(p, struct ubi_ltree_entry, rb);
if (vol_id < le->vol_id)
p = p->rb_left;
else if (vol_id > le->vol_id)
p = p->rb_right;
else {
if (lnum < le->lnum)
p = p->rb_left;
else if (lnum > le->lnum)
p = p->rb_right;
else
return le;
}
}
return NULL;
}
/**
* ltree_add_entry - add new entry to the lock tree.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
* lock tree. If such entry is already there, its usage counter is increased.
* Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
* failed.
*/
static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
int vol_id, int lnum)
{
struct ubi_ltree_entry *le, *le1, *le_free;
le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
le->users = 0;
init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
spin_lock(&ubi->ltree_lock);
le1 = ltree_lookup(ubi, vol_id, lnum);
if (le1) {
/*
* This logical eraseblock is already locked. The newly
* allocated lock entry is not needed.
*/
le_free = le;
le = le1;
} else {
struct rb_node **p, *parent = NULL;
/*
* No lock entry, add the newly allocated one to the
* @ubi->ltree RB-tree.
*/
le_free = NULL;
p = &ubi->ltree.rb_node;
while (*p) {
parent = *p;
le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
if (vol_id < le1->vol_id)
p = &(*p)->rb_left;
else if (vol_id > le1->vol_id)
p = &(*p)->rb_right;
else {
ubi_assert(lnum != le1->lnum);
if (lnum < le1->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
}
rb_link_node(&le->rb, parent, p);
rb_insert_color(&le->rb, &ubi->ltree);
}
le->users += 1;
spin_unlock(&ubi->ltree_lock);
kfree(le_free);
return le;
}
/**
* leb_read_lock - lock logical eraseblock for reading.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for reading. Returns zero in case
* of success and a negative error code in case of failure.
*/
static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
down_read(&le->mutex);
return 0;
}
/**
* leb_read_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*/
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
up_read(&le->mutex);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
}
/**
* leb_write_lock - lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for writing. Returns zero in case
* of success and a negative error code in case of failure.
*/
static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
down_write(&le->mutex);
return 0;
}
/**
* leb_write_lock - lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
* This function locks a logical eraseblock for writing if there is no
* contention and does nothing if there is contention. Returns %0 in case of
* success, %1 in case of contention, and and a negative error code in case of
* failure.
*/
static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
return PTR_ERR(le);
if (down_write_trylock(&le->mutex))
return 0;
/* Contention, cancel */
spin_lock(&ubi->ltree_lock);
le->users -= 1;
ubi_assert(le->users >= 0);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
return 1;
}
/**
* leb_write_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
*/
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
le->users -= 1;
ubi_assert(le->users >= 0);
up_write(&le->mutex);
if (le->users == 0) {
rb_erase(&le->rb, &ubi->ltree);
kfree(le);
}
spin_unlock(&ubi->ltree_lock);
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules corresponding
* physical eraseblock for erasure. Returns zero in case of success and a
* negative error code in case of failure.
*/
int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum)
{
int err, pnum, vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl[lnum];
if (pnum < 0)
/* This logical eraseblock is already unmapped */
goto out_unlock;
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
err = ubi_wl_put_peb(ubi, pnum, 0);
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: buffer to store the read data
* @offset: offset from where to read
* @len: how many bytes to read
* @check: data CRC check flag
*
* If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
* bytes. The @check flag only makes sense for static volumes and forces
* eraseblock data CRC checking.
*
* In case of success this function returns zero. In case of a static volume,
* if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
* returned for any volume type if an ECC error was detected by the MTD device
* driver. Other negative error cored may be returned in case of other errors.
*/
int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
void *buf, int offset, int len, int check)
{
int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t uninitialized_var(crc);
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl[lnum];
if (pnum < 0) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
* with 0xFF bytes. The exception is static volumes for which
* it is an error to read unmapped logical eraseblocks.
*/
dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
len, offset, vol_id, lnum);
leb_read_unlock(ubi, vol_id, lnum);
ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
memset(buf, 0xFF, len);
return 0;
}
dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
check = 0;
retry:
if (check) {
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
err = -ENOMEM;
goto out_unlock;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0) {
/*
* The header is either absent or corrupted.
* The former case means there is a bug -
* switch to read-only mode just in case.
* The latter case means a real corruption - we
* may try to recover data. FIXME: but this is
* not implemented.
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
ubi_warn("corrupted VID header at PEB "
"%d, LEB %d:%d", pnum, vol_id,
lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
}
goto out_free;
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
crc = be32_to_cpu(vid_hdr->data_crc);
ubi_free_vid_hdr(ubi, vid_hdr);
}
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
if (err) {
if (err == UBI_IO_BITFLIPS) {
scrub = 1;
err = 0;
} else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
if (!check) {
ubi_msg("force data checking");
check = 1;
goto retry;
}
} else
goto out_unlock;
}
if (check) {
uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
if (crc1 != crc) {
ubi_warn("CRC error: calculated %#08x, must be %#08x",
crc1, crc);
err = -EBADMSG;
goto out_unlock;
}
}
if (scrub)
err = ubi_wl_scrub_peb(ubi, pnum);
leb_read_unlock(ubi, vol_id, lnum);
return err;
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
out_unlock:
leb_read_unlock(ubi, vol_id, lnum);
return err;
}
/**
* recover_peb - recover from write failure.
* @ubi: UBI device description object
* @pnum: the physical eraseblock to recover
* @vol_id: volume ID
* @lnum: logical eraseblock number
* @buf: data which was not written because of the write failure
* @offset: offset of the failed write
* @len: how many bytes should have been written
*
* This function is called in case of a write failure and moves all good data
* from the potentially bad physical eraseblock to a good physical eraseblock.
* This function also writes the data which was not written due to the failure.
* Returns new physical eraseblock number in case of success, and a negative
* error code in case of failure.
*/
static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
const void *buf, int offset, int len)
{
int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_hdr *vid_hdr;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
retry:
new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
if (new_pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
}
ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
if (err && err != UBI_IO_BITFLIPS) {
if (err > 0)
err = -EIO;
goto out_put;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_unlock;
}
memcpy(ubi->peb_buf + offset, buf, len);
err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
if (err) {
mutex_unlock(&ubi->buf_mutex);
goto write_error;
}
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
ubi_wl_put_peb(ubi, pnum, 1);
ubi_msg("data was successfully recovered");
return 0;
out_unlock:
mutex_unlock(&ubi->buf_mutex);
out_put:
ubi_wl_put_peb(ubi, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
write_error:
/*
* Bad luck? This physical eraseblock is bad too? Crud. Let's try to
* get another one.
*/
ubi_warn("failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
ubi_msg("try again");
goto retry;
}
/**
* ubi_eba_write_leb - write data to dynamic volume.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
* @len: how many bytes to write
* @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
* @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
*/
int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
return -EROFS;
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
return err;
pnum = vol->eba_tbl[lnum];
if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn("failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
err = recover_peb(ubi, pnum, vol_id, lnum, buf,
offset, len);
if (err)
ubi_ro_mode(ubi);
}
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/*
* The logical eraseblock is not mapped. We have to get a free physical
* eraseblock and write the volume identifier header there first.
*/
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
leb_write_unlock(ubi, vol_id, lnum);
return -ENOMEM;
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return pnum;
}
dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
ubi_warn("failed to write %d bytes at offset %d of "
"LEB %d:%d, PEB %d", len, offset, vol_id,
lnum, pnum);
goto write_error;
}
}
vol->eba_tbl[lnum] = pnum;
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
/*
* Fortunately, this is the first write operation to this physical
* eraseblock, so just put it and request a new one. We assume that if
* this physical eraseblock went bad, the erase code will handle that.
*/
err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
/**
* ubi_eba_write_leb_st - write data to static volume.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
* @dtype: data type
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
* @vol. The @used_ebs argument should contain total number of logical
* eraseblock in this static volume.
*
* When writing to the last logical eraseblock, the @len argument doesn't have
* to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
* to the real data size, although the @buf buffer has to contain the
* alignment. In all other cases, @len has to be aligned.
*
* It is prohibited to write more than once to logical eraseblocks of static
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype,
int used_ebs)
{
int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
if (lnum == used_ebs - 1)
/* If this is the last LEB @len may be unaligned */
len = ALIGN(data_size, ubi->min_io_size);
else
ubi_assert(!(len & (ubi->min_io_size - 1)));
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, data_size);
vid_hdr->vol_type = UBI_VID_STATIC;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->used_ebs = cpu_to_be32(used_ebs);
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return pnum;
}
dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
len, vol_id, lnum, pnum, used_ebs);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
ubi_warn("failed to write %d bytes of data to PEB %d",
len, pnum);
goto write_error;
}
ubi_assert(vol->eba_tbl[lnum] < 0);
vol->eba_tbl[lnum] = pnum;
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
/*
* ubi_eba_atomic_leb_change - change logical eraseblock atomically.
* @ubi: UBI device description object
* @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
* @dtype: data type
*
* This function changes the contents of a logical eraseblock atomically. @buf
* has to contain new logical eraseblock data, and @len - the length of the
* data, which has to be aligned. This function guarantees that in case of an
* unclean reboot the old contents is preserved. Returns zero in case of
* success and a negative error code in case of failure.
*
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int lnum, const void *buf, int len, int dtype)
{
int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
if (len == 0) {
/*
* Special case when data length is zero. In this case the LEB
* has to be unmapped and mapped somewhere else.
*/
err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
}
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err)
goto out_mutex;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, len);
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->data_size = cpu_to_be32(len);
vid_hdr->copy_flag = 1;
vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
err = pnum;
goto out_leb_unlock;
}
dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
vol_id, lnum, vol->eba_tbl[lnum], pnum);
err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
if (err) {
ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
vol_id, lnum, pnum);
goto write_error;
}
err = ubi_io_write_data(ubi, buf, pnum, 0, len);
if (err) {
ubi_warn("failed to write %d bytes of data to PEB %d",
len, pnum);
goto write_error;
}
if (vol->eba_tbl[lnum] >= 0) {
err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
if (err)
goto out_leb_unlock;
}
vol->eba_tbl[lnum] = pnum;
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
/*
* This flash device does not admit of bad eraseblocks or
* something nasty and unexpected happened. Switch to read-only
* mode just in case.
*/
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
goto out_leb_unlock;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
/**
* is_error_sane - check whether a read error is sane.
* @err: code of the error happened during reading
*
* This is a helper function for 'ubi_eba_copy_leb()' which is called when we
* cannot read data from the target PEB (an error @err happened). If the error
* code is sane, then we treat this error as non-fatal. Otherwise the error is
* fatal and UBI will be switched to R/O mode later.
*
* The idea is that we try not to switch to R/O mode if the read error is
* something which suggests there was a real read problem. E.g., %-EIO. Or a
* memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
* mode, simply because we do not know what happened at the MTD level, and we
* cannot handle this. E.g., the underlying driver may have become crazy, and
* it is safer to switch to R/O mode to preserve the data.
*
* And bear in mind, this is about reading from the target PEB, i.e. the PEB
* which we have just written.
*/
static int is_error_sane(int err)
{
if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
return 0;
return 1;
}
/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
* @to: physical eraseblock number where to copy
* @vid_hdr: VID header of the @from physical eraseblock
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns:
* o %0 in case of success;
* o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
* o a negative error code in case of failure.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
{
int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_volume *vol;
uint32_t crc;
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
data_size = be32_to_cpu(vid_hdr->data_size);
aldata_size = ALIGN(data_size, ubi->min_io_size);
} else
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
idx = vol_id2idx(ubi, vol_id);
spin_lock(&ubi->volumes_lock);
/*
* Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the
* volume deletion un-maps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
vol = ubi->volumes[idx];
spin_unlock(&ubi->volumes_lock);
if (!vol) {
/* No need to do further work, cancel */
dbg_wl("volume %d is being removed, cancel", vol_id);
return MOVE_CANCEL_RACE;
}
/*
* We do not want anybody to write to this logical eraseblock while we
* are moving it, so lock it.
*
* Note, we are using non-waiting locking here, because we cannot sleep
* on the LEB, since it may cause deadlocks. Indeed, imagine a task is
* unmapping the LEB which is mapped to the PEB we are going to move
* (@from). This task locks the LEB and goes sleep in the
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
* %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
* we do not know the reasons of the contention - it may be just a
* normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
return MOVE_RETRY;
}
/*
* The LEB might have been put meanwhile, and the task which put it is
* probably waiting on @ubi->move_mutex. No need to continue the work,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
"PEB %d, cancel", vol_id, lnum, from,
vol->eba_tbl[lnum]);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
/*
* OK, now the LEB is locked and we can safely start moving it. Since
* this function utilizes the @ubi->peb_buf buffer which is shared
* with some other functions - we lock the buffer by taking the
* @ubi->buf_mutex.
*/
mutex_lock(&ubi->buf_mutex);
dbg_wl("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
err = MOVE_SOURCE_RD_ERR;
goto out_unlock_buf;
}
/*
* Now we have got to calculate how much data we have to copy. In
* case of a static volume it is fairly easy - the VID header contains
* the data size. In case of a dynamic volume it is more difficult - we
* have to read the contents, cut 0xFF bytes from the end and copy only
* the first part. We must do this to avoid writing 0xFF bytes as it
* may have some side-effects. And not only this. It is important not
* to include those 0xFFs to CRC because later the they may be filled
* by data.
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
cond_resched();
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
cond_resched();
/*
* It may turn out to be that the whole @from physical eraseblock
* contains only 0xFF bytes. Then we have to only write the VID header
* and do not write any data. This also means we should not set
* @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
*/
if (data_size > 0) {
vid_hdr->copy_flag = 1;
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
cond_resched();
/* Read the VID header back and check if it was written correctly */
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading VID header back from "
"PEB %d", err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err == -EIO)
err = MOVE_TARGET_WR_ERR;
goto out_unlock_buf;
}
cond_resched();
/*
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
memset(ubi->peb_buf, 0xFF, aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data back "
"from PEB %d", err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
err = MOVE_TARGET_BITFLIPS;
goto out_unlock_buf;
}
cond_resched();
if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
ubi_warn("read data back from PEB %d and it is "
"different", to);
err = -EINVAL;
goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
vol->eba_tbl[lnum] = to;
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
/**
* print_rsvd_warning - warn about not having enough reserved PEBs.
* @ubi: UBI device description object
*
* This is a helper function for 'ubi_eba_init_scan()' which is called when UBI
* cannot reserve enough PEBs for bad block handling. This function makes a
* decision whether we have to print a warning or not. The algorithm is as
* follows:
* o if this is a new UBI image, then just print the warning
* o if this is an UBI image which has already been used for some time, print
* a warning only if we can reserve less than 10% of the expected amount of
* the reserved PEB.
*
* The idea is that when UBI is used, PEBs become bad, and the reserved pool
* of PEBs becomes smaller, which is normal and we do not want to scare users
* with a warning every time they attach the MTD device. This was an issue
* reported by real users.
*/
static void print_rsvd_warning(struct ubi_device *ubi,
struct ubi_scan_info *si)
{
/*
* The 1 << 18 (256KiB) number is picked randomly, just a reasonably
* large number to distinguish between newly flashed and used images.
*/
if (si->max_sqnum > (1 << 18)) {
int min = ubi->beb_rsvd_level / 10;
if (!min)
min = 1;
if (ubi->beb_rsvd_pebs > min)
return;
}
ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
" need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
}
/**
* ubi_eba_init_scan - initialize the EBA sub-system using scanning information.
* @ubi: UBI device description object
* @si: scanning information
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
{
int i, j, err, num_volumes;
struct ubi_scan_volume *sv;
struct ubi_volume *vol;
struct ubi_scan_leb *seb;
struct rb_node *rb;
dbg_eba("initialize EBA sub-system");
spin_lock_init(&ubi->ltree_lock);
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
ubi->global_sqnum = si->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
for (i = 0; i < num_volumes; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
cond_resched();
vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
GFP_KERNEL);
if (!vol->eba_tbl) {
err = -ENOMEM;
goto out_free;
}
for (j = 0; j < vol->reserved_pebs; j++)
vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
if (!sv)
continue;
ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
if (seb->lnum >= vol->reserved_pebs)
/*
* This may happen in case of an unclean reboot
* during re-size.
*/
ubi_scan_move_to_list(sv, seb, &si->erase);
vol->eba_tbl[seb->lnum] = seb->pnum;
}
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;
if (ubi->bad_allowed) {
ubi_calculate_reserved(ubi);
if (ubi->avail_pebs < ubi->beb_rsvd_level) {
/* No enough free physical eraseblocks */
ubi->beb_rsvd_pebs = ubi->avail_pebs;
print_rsvd_warning(ubi, si);
} else
ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
ubi->avail_pebs -= ubi->beb_rsvd_pebs;
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
dbg_eba("EBA sub-system is initialized");
return 0;
out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
kfree(ubi->volumes[i]->eba_tbl);
ubi->volumes[i]->eba_tbl = NULL;
}
return err;
}
| gpl-2.0 |
C-Aniruddh/vortex_condor | drivers/scsi/scsi_devinfo.c | 7276 | 29725 |
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_devinfo.h>
#include "scsi_priv.h"
/*
* scsi_dev_info_list: structure to hold black/white listed devices.
*/
struct scsi_dev_info_list {
struct list_head dev_info_list;
char vendor[8];
char model[16];
unsigned flags;
unsigned compatible; /* for use with scsi_static_device_list entries */
};
struct scsi_dev_info_list_table {
struct list_head node; /* our node for being on the master list */
struct list_head scsi_dev_info_list; /* head of dev info list */
const char *name; /* name of list for /proc (NULL for global) */
int key; /* unique numeric identifier */
};
static const char spaces[] = " "; /* 16 of them */
static unsigned scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
/*
* scsi_static_device_list: deprecated list of devices that require
* settings that differ from the default, includes black-listed (broken)
* devices. The entries here are added to the tail of scsi_dev_info_list
* via scsi_dev_info_list_init.
*
* Do not add to this list, use the command line or proc interface to add
* to the scsi_dev_info_list. This table will eventually go away.
*/
static struct {
char *vendor;
char *model;
char *revision; /* revision known to be bad, unused */
unsigned flags;
} scsi_static_device_list[] __initdata = {
/*
* The following devices are known not to tolerate a lun != 0 scan
* for one reason or another. Some will respond to all luns,
* others will lock up.
*/
{"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* locks up */
{"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* locks up */
{"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* locks up */
{"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */
{"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */
{"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
{"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */
{"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */
{"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* locks up */
{"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* locks up */
{"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /* responds to all lun */
{"MICROTEK", "ScanMakerIII", "2.30", BLIST_NOLUN}, /* responds to all lun */
{"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN},/* locks up */
{"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* responds to all lun */
{"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* locks up */
{"SUN", "SENA", NULL, BLIST_NOLUN}, /* responds to all luns */
/*
* The following causes a failed REQUEST SENSE on lun 1 for
* aha152x controller, which causes SCSI code to reset bus.
*/
{"SANYO", "CRD-250S", "1.20", BLIST_NOLUN},
/*
* The following causes a failed REQUEST SENSE on lun 1 for
* aha152x controller, which causes SCSI code to reset bus.
*/
{"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN},
{"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* responds to all lun */
{"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* responds to all lun */
{"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN},
{"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN},
{"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN},
{"SONY", "CD-ROM CDU-8012", NULL, BLIST_NOLUN},
{"SONY", "SDT-5000", "3.17", BLIST_SELECT_NO_ATN},
{"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* locks up */
{"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* locks up */
/*
* The following causes a failed REQUEST SENSE on lun 1 for
* seagate controller, which causes SCSI code to reset bus.
*/
{"TEAC", "CD-ROM", "1.06", BLIST_NOLUN},
{"TEAC", "MT-2ST/45S2-27", "RV M", BLIST_NOLUN}, /* responds to all lun */
/*
* The following causes a failed REQUEST SENSE on lun 1 for
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
{"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
{"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
{"NEC", "D3856", "0009", BLIST_NOLUN},
{"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* locks up */
{"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* locks up */
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
{"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */
{"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */
/*
* Other types of devices that have special flags.
* Note that all USB devices should have the BLIST_INQUIRY_36 flag.
*/
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
{"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
{"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
{"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
{"CNSI", "G7324", NULL, BLIST_SPARSELUN}, /* Chaparral G7324 RAID */
{"CNSi", "G8324", NULL, BLIST_SPARSELUN}, /* Chaparral G8324 RAID */
{"COMPAQ", "ARRAY CONTROLLER", NULL, BLIST_SPARSELUN | BLIST_LARGELUN |
BLIST_MAX_512 | BLIST_REPORTLUN2}, /* Compaq RA4x00 */
{"COMPAQ", "LOGICAL VOLUME", NULL, BLIST_FORCELUN | BLIST_MAX_512}, /* Compaq RA4x00 */
{"COMPAQ", "CR3500", NULL, BLIST_FORCELUN},
{"COMPAQ", "MSA1000", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
{"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
{"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN},
{"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"DELL", "PV660F", NULL, BLIST_SPARSELUN},
{"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN},
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "F8", NULL, BLIST_NOREPORTLUN},
{"FSC", "CentricStor", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
{"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
{"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
{"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
{"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
{"INSITE", "I325VM", NULL, BLIST_KEY},
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
{"MegaRAID", "LD", NULL, BLIST_FORCELUN},
{"MICROP", "4110", NULL, BLIST_NOTQ},
{"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
{"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
{"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "", NULL, BLIST_SPARSELUN},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
{"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */
{"SEAGATE", "ST3390N", "9546", BLIST_NOTQ},
{"SGI", "RAID3", "*", BLIST_SPARSELUN},
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
{"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
{"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */
{"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
{"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
{"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
{"WangDAT", "Model 1300", "02.4", BLIST_SELECT_NO_ATN},
{"WDC WD25", "00JB-00FUA0", NULL, BLIST_NOREPORTLUN},
{"XYRATEX", "RS", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"Zzyzx", "RocketStor 500S", NULL, BLIST_SPARSELUN},
{"Zzyzx", "RocketStor 2000", NULL, BLIST_SPARSELUN},
{ NULL, NULL, NULL, 0 },
};
static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key)
{
struct scsi_dev_info_list_table *devinfo_table;
int found = 0;
list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
if (devinfo_table->key == key) {
found = 1;
break;
}
if (!found)
return ERR_PTR(-EINVAL);
return devinfo_table;
}
/*
* scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
* devinfo vendor and model strings.
*/
static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
char *from, int compatible)
{
size_t from_length;
from_length = strlen(from);
strncpy(to, from, min(to_length, from_length));
if (from_length < to_length) {
if (compatible) {
/*
* NUL terminate the string if it is short.
*/
to[from_length] = '\0';
} else {
/*
* space pad the string if it is short.
*/
strncpy(&to[from_length], spaces,
to_length - from_length);
}
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
__func__, name, from);
}
/**
* scsi_dev_info_list_add - add one dev_info list entry.
* @compatible: if true, null terminate short strings. Otherwise space pad.
* @vendor: vendor string
* @model: model (product) string
* @strflags: integer string
* @flags: if strflags NULL, use this flag value
*
* Description:
* Create and add one dev_info entry for @vendor, @model, @strflags or
* @flag. If @compatible, add to the tail of the list, do not space
* pad, and set devinfo->compatible. The scsi_static_device_list entries
* are added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
char *strflags, int flags)
{
return scsi_dev_info_list_add_keyed(compatible, vendor, model,
strflags, flags,
SCSI_DEVINFO_GLOBAL);
}
/**
* scsi_dev_info_list_add_keyed - add one dev_info list entry.
* @compatible: if true, null terminate short strings. Otherwise space pad.
* @vendor: vendor string
* @model: model (product) string
* @strflags: integer string
* @flags: if strflags NULL, use this flag value
* @key: specify list to use
*
* Description:
* Create and add one dev_info entry for @vendor, @model,
* @strflags or @flag in list specified by @key. If @compatible,
* add to the tail of the list, do not space pad, and set
* devinfo->compatible. The scsi_static_device_list entries are
* added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
char *strflags, int flags, int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
if (IS_ERR(devinfo_table))
return PTR_ERR(devinfo_table);
devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
if (!devinfo) {
printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM;
}
scsi_strcpy_devinfo("vendor", devinfo->vendor, sizeof(devinfo->vendor),
vendor, compatible);
scsi_strcpy_devinfo("model", devinfo->model, sizeof(devinfo->model),
model, compatible);
if (strflags)
devinfo->flags = simple_strtoul(strflags, NULL, 0);
else
devinfo->flags = flags;
devinfo->compatible = compatible;
if (compatible)
list_add_tail(&devinfo->dev_info_list,
&devinfo_table->scsi_dev_info_list);
else
list_add(&devinfo->dev_info_list,
&devinfo_table->scsi_dev_info_list);
return 0;
}
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
* scsi_dev_info_list_del_keyed - remove one dev_info list entry.
* @vendor: vendor string
* @model: model (product) string
* @key: specify list to use
*
* Description:
* Remove and destroy one dev_info entry for @vendor, @model
* in list specified by @key.
*
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
{
struct scsi_dev_info_list *devinfo, *found = NULL;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
if (IS_ERR(devinfo_table))
return PTR_ERR(devinfo_table);
list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
dev_info_list) {
if (devinfo->compatible) {
/*
* Behave like the older version of get_device_flags.
*/
size_t max;
/*
* XXX why skip leading spaces? If an odd INQUIRY
* value, that should have been part of the
* scsi_static_device_list[] entry, such as " FOO"
* rather than "FOO". Since this code is already
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
max = 8; /* max length of vendor */
while ((max > 0) && *vendor == ' ') {
max--;
vendor++;
}
/*
* XXX removing the following strlen() would be
* good, using it means that for a an entry not in
* the list, we scan every byte of every vendor
* listed in scsi_static_device_list[], and never match
* a single one (and still have to compare at
* least the first byte of each vendor).
*/
if (memcmp(devinfo->vendor, vendor,
min(max, strlen(devinfo->vendor))))
continue;
/*
* Skip spaces again.
*/
max = 16; /* max length of model */
while ((max > 0) && *model == ' ') {
max--;
model++;
}
if (memcmp(devinfo->model, model,
min(max, strlen(devinfo->model))))
continue;
found = devinfo;
} else {
if (!memcmp(devinfo->vendor, vendor,
sizeof(devinfo->vendor)) &&
!memcmp(devinfo->model, model,
sizeof(devinfo->model)))
found = devinfo;
}
if (found)
break;
}
if (found) {
list_del(&found->dev_info_list);
kfree(found);
return 0;
}
return -ENOENT;
}
EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
* Description:
* Parse dev_list, and add entries to the scsi_dev_info_list.
* dev_list is of the form "vendor:product:flag,vendor:product:flag".
* dev_list is modified via strsep. Can be called for command line
* addition, for proc or mabye a sysfs interface.
*
* Returns: 0 if OK, -error on failure.
**/
static int scsi_dev_info_list_add_str(char *dev_list)
{
char *vendor, *model, *strflags, *next;
char *next_check;
int res = 0;
next = dev_list;
if (next && next[0] == '"') {
/*
* Ignore both the leading and trailing quote.
*/
next++;
next_check = ",\"";
} else {
next_check = ",";
}
/*
* For the leading and trailing '"' case, the for loop comes
* through the last time with vendor[0] == '\0'.
*/
for (vendor = strsep(&next, ":"); vendor && (vendor[0] != '\0')
&& (res == 0); vendor = strsep(&next, ":")) {
strflags = NULL;
model = strsep(&next, ":");
if (model)
strflags = strsep(&next, next_check);
if (!model || !strflags) {
printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
" '%s'\n", __func__, vendor, model,
strflags);
res = -EINVAL;
} else
res = scsi_dev_info_list_add(0 /* compatible */, vendor,
model, strflags, 0);
}
return res;
}
/**
* get_device_flags - get device specific flags from the dynamic device list.
* @sdev: &scsi_device to get flags for
* @vendor: vendor name
* @model: model name
*
* Description:
* Search the global scsi_dev_info_list (specified by list zero)
* for an entry matching @vendor and @model, if found, return the
* matching flags value, else return the host or global default
* settings. Called during scan time.
**/
int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model)
{
return scsi_get_device_flags_keyed(sdev, vendor, model,
SCSI_DEVINFO_GLOBAL);
}
/**
* scsi_get_device_flags_keyed - get device specific flags from the dynamic device list
* @sdev: &scsi_device to get flags for
* @vendor: vendor name
* @model: model name
* @key: list to look up
*
* Description:
* Search the scsi_dev_info_list specified by @key for an entry
* matching @vendor and @model, if found, return the matching
* flags value, else return the host or global default settings.
* Called during scan time.
**/
int scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table;
devinfo_table = scsi_devinfo_lookup_by_key(key);
if (IS_ERR(devinfo_table))
return PTR_ERR(devinfo_table);
list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
dev_info_list) {
if (devinfo->compatible) {
/*
* Behave like the older version of get_device_flags.
*/
size_t max;
/*
* XXX why skip leading spaces? If an odd INQUIRY
* value, that should have been part of the
* scsi_static_device_list[] entry, such as " FOO"
* rather than "FOO". Since this code is already
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
max = 8; /* max length of vendor */
while ((max > 0) && *vendor == ' ') {
max--;
vendor++;
}
/*
* XXX removing the following strlen() would be
* good, using it means that for a an entry not in
* the list, we scan every byte of every vendor
* listed in scsi_static_device_list[], and never match
* a single one (and still have to compare at
* least the first byte of each vendor).
*/
if (memcmp(devinfo->vendor, vendor,
min(max, strlen(devinfo->vendor))))
continue;
/*
* Skip spaces again.
*/
max = 16; /* max length of model */
while ((max > 0) && *model == ' ') {
max--;
model++;
}
if (memcmp(devinfo->model, model,
min(max, strlen(devinfo->model))))
continue;
return devinfo->flags;
} else {
if (!memcmp(devinfo->vendor, vendor,
sizeof(devinfo->vendor)) &&
!memcmp(devinfo->model, model,
sizeof(devinfo->model)))
return devinfo->flags;
}
}
/* nothing found, return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
/* except for the global list, where we have an exception */
if (sdev->sdev_bflags)
return sdev->sdev_bflags;
return scsi_default_dev_flags;
}
EXPORT_SYMBOL(scsi_get_device_flags_keyed);
#ifdef CONFIG_SCSI_PROC_FS
struct double_list {
struct list_head *top;
struct list_head *bottom;
};
static int devinfo_seq_show(struct seq_file *m, void *v)
{
struct double_list *dl = v;
struct scsi_dev_info_list_table *devinfo_table =
list_entry(dl->top, struct scsi_dev_info_list_table, node);
struct scsi_dev_info_list *devinfo =
list_entry(dl->bottom, struct scsi_dev_info_list,
dev_info_list);
if (devinfo_table->scsi_dev_info_list.next == dl->bottom &&
devinfo_table->name)
seq_printf(m, "[%s]:\n", devinfo_table->name);
seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
devinfo->vendor, devinfo->model, devinfo->flags);
return 0;
}
static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos)
{
struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL);
loff_t pos = *ppos;
if (!dl)
return NULL;
list_for_each(dl->top, &scsi_dev_info_list) {
struct scsi_dev_info_list_table *devinfo_table =
list_entry(dl->top, struct scsi_dev_info_list_table,
node);
list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list)
if (pos-- == 0)
return dl;
}
kfree(dl);
return NULL;
}
static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos)
{
struct double_list *dl = v;
struct scsi_dev_info_list_table *devinfo_table =
list_entry(dl->top, struct scsi_dev_info_list_table, node);
++*ppos;
dl->bottom = dl->bottom->next;
while (&devinfo_table->scsi_dev_info_list == dl->bottom) {
dl->top = dl->top->next;
if (dl->top == &scsi_dev_info_list) {
kfree(dl);
return NULL;
}
devinfo_table = list_entry(dl->top,
struct scsi_dev_info_list_table,
node);
dl->bottom = devinfo_table->scsi_dev_info_list.next;
}
return dl;
}
static void devinfo_seq_stop(struct seq_file *m, void *v)
{
kfree(v);
}
static const struct seq_operations scsi_devinfo_seq_ops = {
.start = devinfo_seq_start,
.next = devinfo_seq_next,
.stop = devinfo_seq_stop,
.show = devinfo_seq_show,
};
static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_devinfo_seq_ops);
}
/*
* proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
*
* Description: Adds a black/white list entry for vendor and model with an
* integer value of flag to the scsi device info list.
* To use, echo "vendor:model:flag" > /proc/scsi/device_info
*/
static ssize_t proc_scsi_devinfo_write(struct file *file,
const char __user *buf,
size_t length, loff_t *ppos)
{
char *buffer;
ssize_t err = length;
if (!buf || length>PAGE_SIZE)
return -EINVAL;
if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(buffer, buf, length)) {
err =-EFAULT;
goto out;
}
if (length < PAGE_SIZE)
buffer[length] = '\0';
else if (buffer[PAGE_SIZE-1]) {
err = -EINVAL;
goto out;
}
scsi_dev_info_list_add_str(buffer);
out:
free_page((unsigned long)buffer);
return err;
}
static const struct file_operations scsi_devinfo_proc_fops = {
.owner = THIS_MODULE,
.open = proc_scsi_devinfo_open,
.read = seq_read,
.write = proc_scsi_devinfo_write,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_SCSI_PROC_FS */
module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0);
MODULE_PARM_DESC(dev_flags,
"Given scsi_dev_flags=vendor:model:flags[,v:m:f] add black/white"
" list entries for vendor and model with an integer value of flags"
" to the scsi device info list");
module_param_named(default_dev_flags, scsi_default_dev_flags, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(default_dev_flags,
"scsi default device flag integer value");
/**
* scsi_exit_devinfo - remove /proc/scsi/device_info & the scsi_dev_info_list
**/
void scsi_exit_devinfo(void)
{
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_entry("scsi/device_info", NULL);
#endif
scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL);
}
/**
* scsi_dev_info_add_list - add a new devinfo list
* @key: key of the list to add
* @name: Name of the list to add (for /proc/scsi/device_info)
*
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
int scsi_dev_info_add_list(int key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
if (!IS_ERR(devinfo_table))
/* list already exists */
return -EEXIST;
devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL);
if (!devinfo_table)
return -ENOMEM;
INIT_LIST_HEAD(&devinfo_table->node);
INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list);
devinfo_table->name = name;
devinfo_table->key = key;
list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
return 0;
}
EXPORT_SYMBOL(scsi_dev_info_add_list);
/**
* scsi_dev_info_remove_list - destroy an added devinfo list
* @key: key of the list to destroy
*
* Iterates over the entire list first, freeing all the values, then
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
int scsi_dev_info_remove_list(int key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
if (IS_ERR(devinfo_table))
/* no such list */
return -EINVAL;
/* remove from the master list */
list_del(&devinfo_table->node);
list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) {
struct scsi_dev_info_list *devinfo;
devinfo = list_entry(lh, struct scsi_dev_info_list,
dev_info_list);
kfree(devinfo);
}
kfree(devinfo_table);
return 0;
}
EXPORT_SYMBOL(scsi_dev_info_remove_list);
/**
* scsi_init_devinfo - set up the dynamic device list.
*
* Description:
* Add command line entries from scsi_dev_flags, then add
* scsi_static_device_list entries to the scsi device info list.
*/
int __init scsi_init_devinfo(void)
{
#ifdef CONFIG_SCSI_PROC_FS
struct proc_dir_entry *p;
#endif
int error, i;
error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL);
if (error)
return error;
error = scsi_dev_info_list_add_str(scsi_dev_flags);
if (error)
goto out;
for (i = 0; scsi_static_device_list[i].vendor; i++) {
error = scsi_dev_info_list_add(1 /* compatibile */,
scsi_static_device_list[i].vendor,
scsi_static_device_list[i].model,
NULL,
scsi_static_device_list[i].flags);
if (error)
goto out;
}
#ifdef CONFIG_SCSI_PROC_FS
p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
if (!p) {
error = -ENOMEM;
goto out;
}
#endif /* CONFIG_SCSI_PROC_FS */
out:
if (error)
scsi_exit_devinfo();
return error;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.