repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
raisul2010/samurai-kernel | drivers/input/ff-core.c | 986 | 9051 | /*
* Force feedback support for Linux input subsystem
*
* Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com>
* Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* #define DEBUG */
#define debug(format, arg...) pr_debug("ff-core: " format "\n", ## arg)
#include <linux/input.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/slab.h>
/*
* Check that the effect_id is a valid effect and whether the user
* is the owner
*/
static int check_effect_access(struct ff_device *ff, int effect_id,
struct file *file)
{
if (effect_id < 0 || effect_id >= ff->max_effects ||
!ff->effect_owners[effect_id])
return -EINVAL;
if (file && ff->effect_owners[effect_id] != file)
return -EACCES;
return 0;
}
/*
* Checks whether 2 effects can be combined together
*/
static inline int check_effects_compatible(struct ff_effect *e1,
struct ff_effect *e2)
{
return e1->type == e2->type &&
(e1->type != FF_PERIODIC ||
e1->u.periodic.waveform == e2->u.periodic.waveform);
}
/*
* Convert an effect into compatible one
*/
static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
{
int magnitude;
switch (effect->type) {
case FF_RUMBLE:
if (!test_bit(FF_PERIODIC, ff->ffbit))
return -EINVAL;
/*
* calculate manginude of sine wave as average of rumble's
* 2/3 of strong magnitude and 1/3 of weak magnitude
*/
magnitude = effect->u.rumble.strong_magnitude / 3 +
effect->u.rumble.weak_magnitude / 6;
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
effect->u.periodic.magnitude = max(magnitude, 0x7fff);
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
effect->u.periodic.envelope.attack_level = 0;
effect->u.periodic.envelope.fade_length = 0;
effect->u.periodic.envelope.fade_level = 0;
return 0;
default:
/* Let driver handle conversion */
return 0;
}
}
/**
* input_ff_upload() - upload effect into force-feedback device
* @dev: input device
* @effect: effect to be uploaded
* @file: owner of the effect
*/
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect,
struct file *file)
{
struct ff_device *ff = dev->ff;
struct ff_effect *old;
int ret = 0;
int id;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX ||
!test_bit(effect->type, dev->ffbit)) {
debug("invalid or not supported effect type in upload");
return -EINVAL;
}
if (effect->type == FF_PERIODIC &&
(effect->u.periodic.waveform < FF_WAVEFORM_MIN ||
effect->u.periodic.waveform > FF_WAVEFORM_MAX ||
!test_bit(effect->u.periodic.waveform, dev->ffbit))) {
debug("invalid or not supported wave form in upload");
return -EINVAL;
}
if (!test_bit(effect->type, ff->ffbit)) {
ret = compat_effect(ff, effect);
if (ret)
return ret;
}
mutex_lock(&ff->mutex);
if (effect->id == -1) {
for (id = 0; id < ff->max_effects; id++)
if (!ff->effect_owners[id])
break;
if (id >= ff->max_effects) {
ret = -ENOSPC;
goto out;
}
effect->id = id;
old = NULL;
} else {
id = effect->id;
ret = check_effect_access(ff, id, file);
if (ret)
goto out;
old = &ff->effects[id];
if (!check_effects_compatible(effect, old)) {
ret = -EINVAL;
goto out;
}
}
ret = ff->upload(dev, effect, old);
if (ret)
goto out;
spin_lock_irq(&dev->event_lock);
ff->effects[id] = *effect;
ff->effect_owners[id] = file;
spin_unlock_irq(&dev->event_lock);
out:
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_upload);
/*
* Erases the effect if the requester is also the effect owner. The mutex
* should already be locked before calling this function.
*/
static int erase_effect(struct input_dev *dev, int effect_id,
struct file *file)
{
struct ff_device *ff = dev->ff;
int error;
error = check_effect_access(ff, effect_id, file);
if (error)
return error;
spin_lock_irq(&dev->event_lock);
ff->playback(dev, effect_id, 0);
ff->effect_owners[effect_id] = NULL;
spin_unlock_irq(&dev->event_lock);
if (ff->erase) {
error = ff->erase(dev, effect_id);
if (error) {
spin_lock_irq(&dev->event_lock);
ff->effect_owners[effect_id] = file;
spin_unlock_irq(&dev->event_lock);
return error;
}
}
return 0;
}
/**
* input_ff_erase - erase a force-feedback effect from device
* @dev: input device to erase effect from
* @effect_id: id of the ffect to be erased
* @file: purported owner of the request
*
* This function erases a force-feedback effect from specified device.
* The effect will only be erased if it was uploaded through the same
* file handle that is requesting erase.
*/
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
{
struct ff_device *ff = dev->ff;
int ret;
if (!test_bit(EV_FF, dev->evbit))
return -ENOSYS;
mutex_lock(&ff->mutex);
ret = erase_effect(dev, effect_id, file);
mutex_unlock(&ff->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
* flush_effects - erase all effects owned by a file handle
*/
static int flush_effects(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
debug("flushing now");
mutex_lock(&ff->mutex);
for (i = 0; i < ff->max_effects; i++)
erase_effect(dev, i, file);
mutex_unlock(&ff->mutex);
return 0;
}
/**
* input_ff_event() - generic handler for force-feedback events
* @dev: input device to send the effect to
* @type: event type (anything but EV_FF is ignored)
* @code: event code
* @value: event value
*/
int input_ff_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct ff_device *ff = dev->ff;
if (type != EV_FF)
return 0;
switch (code) {
case FF_GAIN:
if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff)
break;
ff->set_gain(dev, value);
break;
case FF_AUTOCENTER:
if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff)
break;
ff->set_autocenter(dev, value);
break;
default:
if (check_effect_access(ff, code, NULL) == 0)
ff->playback(dev, code, value);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_event);
/**
* input_ff_create() - create force-feedback device
* @dev: input device supporting force-feedback
* @max_effects: maximum number of effects supported by the device
*
* This function allocates all necessary memory for a force feedback
* portion of an input device and installs all default handlers.
* @dev->ffbit should be already set up before calling this function.
* Once ff device is created you need to setup its upload, erase,
* playback and other handlers before registering input device
*/
int input_ff_create(struct input_dev *dev, int max_effects)
{
struct ff_device *ff;
int i;
if (!max_effects) {
printk(KERN_ERR
"ff-core: cannot allocate device without any effects\n");
return -EINVAL;
}
ff = kzalloc(sizeof(struct ff_device) +
max_effects * sizeof(struct file *), GFP_KERNEL);
if (!ff)
return -ENOMEM;
ff->effects = kcalloc(max_effects, sizeof(struct ff_effect),
GFP_KERNEL);
if (!ff->effects) {
kfree(ff);
return -ENOMEM;
}
ff->max_effects = max_effects;
mutex_init(&ff->mutex);
dev->ff = ff;
dev->flush = flush_effects;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
/* Copy "true" bits into ff device bitmap */
for (i = 0; i <= FF_MAX; i++)
if (test_bit(i, dev->ffbit))
__set_bit(i, ff->ffbit);
/* we can emulate RUMBLE with periodic effects */
if (test_bit(FF_PERIODIC, ff->ffbit))
__set_bit(FF_RUMBLE, dev->ffbit);
return 0;
}
EXPORT_SYMBOL_GPL(input_ff_create);
/**
* input_ff_destroy() - frees force feedback portion of input device
* @dev: input device supporting force feedback
*
* This function is only needed in error path as input core will
* automatically free force feedback structures when device is
* destroyed.
*/
void input_ff_destroy(struct input_dev *dev)
{
struct ff_device *ff = dev->ff;
__clear_bit(EV_FF, dev->evbit);
if (ff) {
if (ff->destroy)
ff->destroy(ff);
kfree(ff->private);
kfree(ff->effects);
kfree(ff);
dev->ff = NULL;
}
}
EXPORT_SYMBOL_GPL(input_ff_destroy);
| gpl-2.0 |
arjen75/ics-lge-kernel-msm7x27-chick | fs/udf/symlink.c | 986 | 2411 | /*
* symlink.c
*
* PURPOSE
* Symlink handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2001 Ben Fennema
* (C) 1999 Stelias Computing Inc
*
* HISTORY
*
* 04/16/99 blf Created.
*
*/
#include "udfdecl.h"
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
int fromlen, unsigned char *to)
{
struct pathComponent *pc;
int elen = 0;
unsigned char *p = to;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
switch (pc->componentType) {
case 1:
if (pc->lengthComponentIdent == 0) {
p = to;
*p++ = '/';
}
break;
case 3:
memcpy(p, "../", 3);
p += 3;
break;
case 4:
memcpy(p, "./", 2);
p += 2;
/* that would be . - just ignore */
break;
case 5:
p += udf_get_filename(sb, pc->componentIdent, p,
pc->lengthComponentIdent);
*p++ = '/';
break;
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
}
static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err = -EIO;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
lock_kernel();
iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
if (!bh)
goto out;
symlink = bh->b_data;
}
udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
brelse(bh);
unlock_kernel();
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out:
unlock_kernel();
SetPageError(page);
kunmap(page);
unlock_page(page);
return err;
}
/*
* symlinks can't do much...
*/
const struct address_space_operations udf_symlink_aops = {
.readpage = udf_symlink_filler,
};
| gpl-2.0 |
mephistophilis/samsung_nowplus_kernel | arch/arm/mach-at91/at91sam9260_devices.c | 1754 | 34094 | /*
* arch/arm/mach-at91/at91sam9260_devices.c
*
* Copyright (C) 2006 Atmel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/cpu.h>
#include <mach/at91sam9260.h>
#include <mach/at91sam9260_matrix.h>
#include <mach/at91sam9_smc.h>
#include "generic.h"
/* --------------------------------------------------------------------
* USB Host
* -------------------------------------------------------------------- */
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static u64 ohci_dmamask = DMA_BIT_MASK(32);
static struct at91_usbh_data usbh_data;
static struct resource usbh_resources[] = {
[0] = {
.start = AT91SAM9260_UHP_BASE,
.end = AT91SAM9260_UHP_BASE + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_UHP,
.end = AT91SAM9260_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91_usbh_device = {
.name = "at91_ohci",
.id = -1,
.dev = {
.dma_mask = &ohci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &usbh_data,
},
.resource = usbh_resources,
.num_resources = ARRAY_SIZE(usbh_resources),
};
void __init at91_add_device_usbh(struct at91_usbh_data *data)
{
if (!data)
return;
usbh_data = *data;
platform_device_register(&at91_usbh_device);
}
#else
void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
#endif
/* --------------------------------------------------------------------
* USB Device (Gadget)
* -------------------------------------------------------------------- */
#ifdef CONFIG_USB_GADGET_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_UDP,
.end = AT91SAM9260_BASE_UDP + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_UDP,
.end = AT91SAM9260_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91_udc_device = {
.name = "at91_udc",
.id = -1,
.dev = {
.platform_data = &udc_data,
},
.resource = udc_resources,
.num_resources = ARRAY_SIZE(udc_resources),
};
void __init at91_add_device_udc(struct at91_udc_data *data)
{
if (!data)
return;
if (data->vbus_pin) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
/* Pullup pin is handled internally by USB device peripheral */
udc_data = *data;
platform_device_register(&at91_udc_device);
}
#else
void __init at91_add_device_udc(struct at91_udc_data *data) {}
#endif
/* --------------------------------------------------------------------
* Ethernet
* -------------------------------------------------------------------- */
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
static struct at91_eth_data eth_data;
static struct resource eth_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_EMAC,
.end = AT91SAM9260_BASE_EMAC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_EMAC,
.end = AT91SAM9260_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_eth_device = {
.name = "macb",
.id = -1,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = ð_data,
},
.resource = eth_resources,
.num_resources = ARRAY_SIZE(eth_resources),
};
void __init at91_add_device_eth(struct at91_eth_data *data)
{
if (!data)
return;
if (data->phy_irq_pin) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
/* Pins used for MII and RMII */
at91_set_A_periph(AT91_PIN_PA19, 0); /* ETXCK_EREFCK */
at91_set_A_periph(AT91_PIN_PA17, 0); /* ERXDV */
at91_set_A_periph(AT91_PIN_PA14, 0); /* ERX0 */
at91_set_A_periph(AT91_PIN_PA15, 0); /* ERX1 */
at91_set_A_periph(AT91_PIN_PA18, 0); /* ERXER */
at91_set_A_periph(AT91_PIN_PA16, 0); /* ETXEN */
at91_set_A_periph(AT91_PIN_PA12, 0); /* ETX0 */
at91_set_A_periph(AT91_PIN_PA13, 0); /* ETX1 */
at91_set_A_periph(AT91_PIN_PA21, 0); /* EMDIO */
at91_set_A_periph(AT91_PIN_PA20, 0); /* EMDC */
if (!data->is_rmii) {
at91_set_B_periph(AT91_PIN_PA28, 0); /* ECRS */
at91_set_B_periph(AT91_PIN_PA29, 0); /* ECOL */
at91_set_B_periph(AT91_PIN_PA25, 0); /* ERX2 */
at91_set_B_periph(AT91_PIN_PA26, 0); /* ERX3 */
at91_set_B_periph(AT91_PIN_PA27, 0); /* ERXCK */
at91_set_B_periph(AT91_PIN_PA23, 0); /* ETX2 */
at91_set_B_periph(AT91_PIN_PA24, 0); /* ETX3 */
at91_set_B_periph(AT91_PIN_PA22, 0); /* ETXER */
}
eth_data = *data;
platform_device_register(&at91sam9260_eth_device);
}
#else
void __init at91_add_device_eth(struct at91_eth_data *data) {}
#endif
/* --------------------------------------------------------------------
* MMC / SD
* -------------------------------------------------------------------- */
#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
static u64 mmc_dmamask = DMA_BIT_MASK(32);
static struct at91_mmc_data mmc_data;
static struct resource mmc_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_MCI,
.end = AT91SAM9260_BASE_MCI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_MCI,
.end = AT91SAM9260_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_mmc_device = {
.name = "at91_mci",
.id = -1,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc_data,
},
.resource = mmc_resources,
.num_resources = ARRAY_SIZE(mmc_resources),
};
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
{
if (!data)
return;
/* input/irq */
if (data->det_pin) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
if (data->wp_pin)
at91_set_gpio_input(data->wp_pin, 1);
if (data->vcc_pin)
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
at91_set_A_periph(AT91_PIN_PA8, 0);
if (data->slot_b) {
/* CMD */
at91_set_B_periph(AT91_PIN_PA1, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_B_periph(AT91_PIN_PA0, 1);
if (data->wire4) {
at91_set_B_periph(AT91_PIN_PA5, 1);
at91_set_B_periph(AT91_PIN_PA4, 1);
at91_set_B_periph(AT91_PIN_PA3, 1);
}
} else {
/* CMD */
at91_set_A_periph(AT91_PIN_PA7, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA6, 1);
if (data->wire4) {
at91_set_A_periph(AT91_PIN_PA9, 1);
at91_set_A_periph(AT91_PIN_PA10, 1);
at91_set_A_periph(AT91_PIN_PA11, 1);
}
}
mmc_data = *data;
platform_device_register(&at91sam9260_mmc_device);
}
#else
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
#endif
/* --------------------------------------------------------------------
* MMC / SD Slot for Atmel MCI Driver
* -------------------------------------------------------------------- */
#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
static u64 mmc_dmamask = DMA_BIT_MASK(32);
static struct mci_platform_data mmc_data;
static struct resource mmc_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_MCI,
.end = AT91SAM9260_BASE_MCI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_MCI,
.end = AT91SAM9260_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_mmc_device = {
.name = "atmel_mci",
.id = -1,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc_data,
},
.resource = mmc_resources,
.num_resources = ARRAY_SIZE(mmc_resources),
};
void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
{
unsigned int i;
unsigned int slot_count = 0;
if (!data)
return;
for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
if (data->slot[i].bus_width) {
/* input/irq */
if (data->slot[i].detect_pin) {
at91_set_gpio_input(data->slot[i].detect_pin, 1);
at91_set_deglitch(data->slot[i].detect_pin, 1);
}
if (data->slot[i].wp_pin)
at91_set_gpio_input(data->slot[i].wp_pin, 1);
switch (i) {
case 0:
/* CMD */
at91_set_A_periph(AT91_PIN_PA7, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA6, 1);
if (data->slot[i].bus_width == 4) {
at91_set_A_periph(AT91_PIN_PA9, 1);
at91_set_A_periph(AT91_PIN_PA10, 1);
at91_set_A_periph(AT91_PIN_PA11, 1);
}
slot_count++;
break;
case 1:
/* CMD */
at91_set_B_periph(AT91_PIN_PA1, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_B_periph(AT91_PIN_PA0, 1);
if (data->slot[i].bus_width == 4) {
at91_set_B_periph(AT91_PIN_PA5, 1);
at91_set_B_periph(AT91_PIN_PA4, 1);
at91_set_B_periph(AT91_PIN_PA3, 1);
}
slot_count++;
break;
default:
printk(KERN_ERR
"AT91: SD/MMC slot %d not available\n", i);
break;
}
}
}
if (slot_count) {
/* CLK */
at91_set_A_periph(AT91_PIN_PA8, 0);
mmc_data = *data;
platform_device_register(&at91sam9260_mmc_device);
}
}
#else
void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) {}
#endif
/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
static struct atmel_nand_data nand_data;
#define NAND_BASE AT91_CHIPSELECT_3
static struct resource nand_resources[] = {
[0] = {
.start = NAND_BASE,
.end = NAND_BASE + SZ_256M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_BASE_SYS + AT91_ECC,
.end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91sam9260_nand_device = {
.name = "atmel_nand",
.id = -1,
.dev = {
.platform_data = &nand_data,
},
.resource = nand_resources,
.num_resources = ARRAY_SIZE(nand_resources),
};
void __init at91_add_device_nand(struct atmel_nand_data *data)
{
unsigned long csa;
if (!data)
return;
csa = at91_sys_read(AT91_MATRIX_EBICSA);
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
/* enable pin */
if (data->enable_pin)
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
if (data->rdy_pin)
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
if (data->det_pin)
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
platform_device_register(&at91sam9260_nand_device);
}
#else
void __init at91_add_device_nand(struct atmel_nand_data *data) {}
#endif
/* --------------------------------------------------------------------
* TWI (i2c)
* -------------------------------------------------------------------- */
/*
* Prefer the GPIO code since the TWI controller isn't robust
* (gets overruns and underruns under load) and can only issue
* repeated STARTs in one scenario (the driver doesn't yet handle them).
*/
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
static struct i2c_gpio_platform_data pdata = {
.sda_pin = AT91_PIN_PA23,
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA24,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
};
static struct platform_device at91sam9260_twi_device = {
.name = "i2c-gpio",
.id = -1,
.dev.platform_data = &pdata,
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
at91_set_GPIO_periph(AT91_PIN_PA23, 1); /* TWD (SDA) */
at91_set_multi_drive(AT91_PIN_PA23, 1);
at91_set_GPIO_periph(AT91_PIN_PA24, 1); /* TWCK (SCL) */
at91_set_multi_drive(AT91_PIN_PA24, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91sam9260_twi_device);
}
#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
static struct resource twi_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_TWI,
.end = AT91SAM9260_BASE_TWI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_TWI,
.end = AT91SAM9260_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_twi_device = {
.name = "at91_i2c",
.id = -1,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
/* pins used for TWI interface */
at91_set_A_periph(AT91_PIN_PA23, 0); /* TWD */
at91_set_multi_drive(AT91_PIN_PA23, 1);
at91_set_A_periph(AT91_PIN_PA24, 0); /* TWCK */
at91_set_multi_drive(AT91_PIN_PA24, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91sam9260_twi_device);
}
#else
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
static u64 spi_dmamask = DMA_BIT_MASK(32);
static struct resource spi0_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_SPI0,
.end = AT91SAM9260_BASE_SPI0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_SPI0,
.end = AT91SAM9260_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_spi0_device = {
.name = "atmel_spi",
.id = 0,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi0_resources,
.num_resources = ARRAY_SIZE(spi0_resources),
};
static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PC11, AT91_PIN_PC16, AT91_PIN_PC17 };
static struct resource spi1_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_SPI1,
.end = AT91SAM9260_BASE_SPI1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_SPI1,
.end = AT91SAM9260_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_spi1_device = {
.name = "atmel_spi",
.id = 1,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi1_resources,
.num_resources = ARRAY_SIZE(spi1_resources),
};
static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB3, AT91_PIN_PC5, AT91_PIN_PC4, AT91_PIN_PC3 };
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
{
int i;
unsigned long cs_pin;
short enable_spi0 = 0;
short enable_spi1 = 0;
/* Choose SPI chip-selects */
for (i = 0; i < nr_devices; i++) {
if (devices[i].controller_data)
cs_pin = (unsigned long) devices[i].controller_data;
else if (devices[i].bus_num == 0)
cs_pin = spi0_standard_cs[devices[i].chip_select];
else
cs_pin = spi1_standard_cs[devices[i].chip_select];
if (devices[i].bus_num == 0)
enable_spi0 = 1;
else
enable_spi1 = 1;
/* enable chip-select pin */
at91_set_gpio_output(cs_pin, 1);
/* pass chip-select pin to driver */
devices[i].controller_data = (void *) cs_pin;
}
spi_register_board_info(devices, nr_devices);
/* Configure SPI bus(es) */
if (enable_spi0) {
at91_set_A_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */
at91_set_A_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPI1_SPCK */
at91_clock_associate("spi0_clk", &at91sam9260_spi0_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi0_device);
}
if (enable_spi1) {
at91_set_A_periph(AT91_PIN_PB0, 0); /* SPI1_MISO */
at91_set_A_periph(AT91_PIN_PB1, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB2, 0); /* SPI1_SPCK */
at91_clock_associate("spi1_clk", &at91sam9260_spi1_device.dev, "spi_clk");
platform_device_register(&at91sam9260_spi1_device);
}
}
#else
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* Timer/Counter blocks
* -------------------------------------------------------------------- */
#ifdef CONFIG_ATMEL_TCLIB
static struct resource tcb0_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_TCB0,
.end = AT91SAM9260_BASE_TCB0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_TC0,
.end = AT91SAM9260_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AT91SAM9260_ID_TC1,
.end = AT91SAM9260_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = AT91SAM9260_ID_TC2,
.end = AT91SAM9260_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_tcb0_device = {
.name = "atmel_tcb",
.id = 0,
.resource = tcb0_resources,
.num_resources = ARRAY_SIZE(tcb0_resources),
};
static struct resource tcb1_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_TCB1,
.end = AT91SAM9260_BASE_TCB1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_TC3,
.end = AT91SAM9260_ID_TC3,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AT91SAM9260_ID_TC4,
.end = AT91SAM9260_ID_TC4,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = AT91SAM9260_ID_TC5,
.end = AT91SAM9260_ID_TC5,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_tcb1_device = {
.name = "atmel_tcb",
.id = 1,
.resource = tcb1_resources,
.num_resources = ARRAY_SIZE(tcb1_resources),
};
static void __init at91_add_device_tc(void)
{
/* this chip has a separate clock and irq for each TC channel */
at91_clock_associate("tc0_clk", &at91sam9260_tcb0_device.dev, "t0_clk");
at91_clock_associate("tc1_clk", &at91sam9260_tcb0_device.dev, "t1_clk");
at91_clock_associate("tc2_clk", &at91sam9260_tcb0_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb0_device);
at91_clock_associate("tc3_clk", &at91sam9260_tcb1_device.dev, "t0_clk");
at91_clock_associate("tc4_clk", &at91sam9260_tcb1_device.dev, "t1_clk");
at91_clock_associate("tc5_clk", &at91sam9260_tcb1_device.dev, "t2_clk");
platform_device_register(&at91sam9260_tcb1_device);
}
#else
static void __init at91_add_device_tc(void) { }
#endif
/* --------------------------------------------------------------------
* RTT
* -------------------------------------------------------------------- */
static struct resource rtt_resources[] = {
{
.start = AT91_BASE_SYS + AT91_RTT,
.end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91sam9260_rtt_device = {
.name = "at91_rtt",
.id = 0,
.resource = rtt_resources,
.num_resources = ARRAY_SIZE(rtt_resources),
};
static void __init at91_add_device_rtt(void)
{
platform_device_register(&at91sam9260_rtt_device);
}
/* --------------------------------------------------------------------
* Watchdog
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
static struct platform_device at91sam9260_wdt_device = {
.name = "at91_wdt",
.id = -1,
.num_resources = 0,
};
static void __init at91_add_device_watchdog(void)
{
platform_device_register(&at91sam9260_wdt_device);
}
#else
static void __init at91_add_device_watchdog(void) {}
#endif
/* --------------------------------------------------------------------
* SSC -- Synchronous Serial Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE)
static u64 ssc_dmamask = DMA_BIT_MASK(32);
static struct resource ssc_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_SSC,
.end = AT91SAM9260_BASE_SSC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_SSC,
.end = AT91SAM9260_ID_SSC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91sam9260_ssc_device = {
.name = "ssc",
.id = 0,
.dev = {
.dma_mask = &ssc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc_resources,
.num_resources = ARRAY_SIZE(ssc_resources),
};
static inline void configure_ssc_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB17, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB16, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB18, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB19, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB20, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB21, 1);
}
/*
* SSC controllers are accessed through library code, instead of any
* kind of all-singing/all-dancing driver. For example one could be
* used by a particular I2S audio codec's driver, while another one
* on the same system might be used by a custom data capture driver.
*/
void __init at91_add_device_ssc(unsigned id, unsigned pins)
{
struct platform_device *pdev;
/*
* NOTE: caller is responsible for passing information matching
* "pins" to whatever will be using each particular controller.
*/
switch (id) {
case AT91SAM9260_ID_SSC:
pdev = &at91sam9260_ssc_device;
configure_ssc_pins(pins);
at91_clock_associate("ssc_clk", &pdev->dev, "pclk");
break;
default:
return;
}
platform_device_register(pdev);
}
#else
void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#endif
/* --------------------------------------------------------------------
* UART
* -------------------------------------------------------------------- */
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
.start = AT91_VA_BASE_SYS + AT91_DBGU,
.end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_ID_SYS,
.end = AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data dbgu_data = {
.use_dma_tx = 0,
.use_dma_rx = 0, /* DBGU not capable of receive DMA */
.regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
};
static u64 dbgu_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_dbgu_device = {
.name = "atmel_usart",
.id = 0,
.dev = {
.dma_mask = &dbgu_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dbgu_data,
},
.resource = dbgu_resources,
.num_resources = ARRAY_SIZE(dbgu_resources),
};
static inline void configure_dbgu_pins(void)
{
at91_set_A_periph(AT91_PIN_PB14, 0); /* DRXD */
at91_set_A_periph(AT91_PIN_PB15, 1); /* DTXD */
}
static struct resource uart0_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US0,
.end = AT91SAM9260_BASE_US0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US0,
.end = AT91SAM9260_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart0_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart0_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart0_device = {
.name = "atmel_usart",
.id = 1,
.dev = {
.dma_mask = &uart0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart0_data,
},
.resource = uart0_resources,
.num_resources = ARRAY_SIZE(uart0_resources),
};
static inline void configure_usart0_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB4, 1); /* TXD0 */
at91_set_A_periph(AT91_PIN_PB5, 0); /* RXD0 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PB26, 0); /* RTS0 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PB27, 0); /* CTS0 */
if (pins & ATMEL_UART_DTR)
at91_set_A_periph(AT91_PIN_PB24, 0); /* DTR0 */
if (pins & ATMEL_UART_DSR)
at91_set_A_periph(AT91_PIN_PB22, 0); /* DSR0 */
if (pins & ATMEL_UART_DCD)
at91_set_A_periph(AT91_PIN_PB23, 0); /* DCD0 */
if (pins & ATMEL_UART_RI)
at91_set_A_periph(AT91_PIN_PB25, 0); /* RI0 */
}
static struct resource uart1_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US1,
.end = AT91SAM9260_BASE_US1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US1,
.end = AT91SAM9260_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart1_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart1_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart1_device = {
.name = "atmel_usart",
.id = 2,
.dev = {
.dma_mask = &uart1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart1_data,
},
.resource = uart1_resources,
.num_resources = ARRAY_SIZE(uart1_resources),
};
static inline void configure_usart1_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB6, 1); /* TXD1 */
at91_set_A_periph(AT91_PIN_PB7, 0); /* RXD1 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PB28, 0); /* RTS1 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PB29, 0); /* CTS1 */
}
static struct resource uart2_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US2,
.end = AT91SAM9260_BASE_US2 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US2,
.end = AT91SAM9260_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart2_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart2_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart2_device = {
.name = "atmel_usart",
.id = 3,
.dev = {
.dma_mask = &uart2_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart2_data,
},
.resource = uart2_resources,
.num_resources = ARRAY_SIZE(uart2_resources),
};
static inline void configure_usart2_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB8, 1); /* TXD2 */
at91_set_A_periph(AT91_PIN_PB9, 0); /* RXD2 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PA4, 0); /* RTS2 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PA5, 0); /* CTS2 */
}
static struct resource uart3_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US3,
.end = AT91SAM9260_BASE_US3 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US3,
.end = AT91SAM9260_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart3_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart3_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart3_device = {
.name = "atmel_usart",
.id = 4,
.dev = {
.dma_mask = &uart3_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart3_data,
},
.resource = uart3_resources,
.num_resources = ARRAY_SIZE(uart3_resources),
};
static inline void configure_usart3_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB10, 1); /* TXD3 */
at91_set_A_periph(AT91_PIN_PB11, 0); /* RXD3 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PC8, 0); /* RTS3 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PC10, 0); /* CTS3 */
}
static struct resource uart4_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US4,
.end = AT91SAM9260_BASE_US4 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US4,
.end = AT91SAM9260_ID_US4,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart4_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart4_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart4_device = {
.name = "atmel_usart",
.id = 5,
.dev = {
.dma_mask = &uart4_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart4_data,
},
.resource = uart4_resources,
.num_resources = ARRAY_SIZE(uart4_resources),
};
static inline void configure_usart4_pins(void)
{
at91_set_B_periph(AT91_PIN_PA31, 1); /* TXD4 */
at91_set_B_periph(AT91_PIN_PA30, 0); /* RXD4 */
}
static struct resource uart5_resources[] = {
[0] = {
.start = AT91SAM9260_BASE_US5,
.end = AT91SAM9260_BASE_US5 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9260_ID_US5,
.end = AT91SAM9260_ID_US5,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart5_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart5_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91sam9260_uart5_device = {
.name = "atmel_usart",
.id = 6,
.dev = {
.dma_mask = &uart5_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart5_data,
},
.resource = uart5_resources,
.num_resources = ARRAY_SIZE(uart5_resources),
};
static inline void configure_usart5_pins(void)
{
at91_set_A_periph(AT91_PIN_PB12, 1); /* TXD5 */
at91_set_A_periph(AT91_PIN_PB13, 0); /* RXD5 */
}
static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
struct platform_device *atmel_default_console_device; /* the serial console device */
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
switch (id) {
case 0: /* DBGU */
pdev = &at91sam9260_dbgu_device;
configure_dbgu_pins();
at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US0:
pdev = &at91sam9260_uart0_device;
configure_usart0_pins(pins);
at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US1:
pdev = &at91sam9260_uart1_device;
configure_usart1_pins(pins);
at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US2:
pdev = &at91sam9260_uart2_device;
configure_usart2_pins(pins);
at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US3:
pdev = &at91sam9260_uart3_device;
configure_usart3_pins(pins);
at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US4:
pdev = &at91sam9260_uart4_device;
configure_usart4_pins();
at91_clock_associate("usart4_clk", &pdev->dev, "usart");
break;
case AT91SAM9260_ID_US5:
pdev = &at91sam9260_uart5_device;
configure_usart5_pins();
at91_clock_associate("usart5_clk", &pdev->dev, "usart");
break;
default:
return;
}
pdev->id = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
}
void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART)
atmel_default_console_device = at91_uarts[portnr];
}
void __init at91_add_device_serial(void)
{
int i;
for (i = 0; i < ATMEL_MAX_UART; i++) {
if (at91_uarts[i])
platform_device_register(at91_uarts[i]);
}
if (!atmel_default_console_device)
printk(KERN_INFO "AT91: No default serial console defined.\n");
}
#else
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
void __init at91_set_serial_console(unsigned portnr) {}
void __init at91_add_device_serial(void) {}
#endif
/* --------------------------------------------------------------------
* CF/IDE
* -------------------------------------------------------------------- */
#if defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE) || \
defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE) || \
defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
static struct at91_cf_data cf0_data;
static struct resource cf0_resources[] = {
[0] = {
.start = AT91_CHIPSELECT_4,
.end = AT91_CHIPSELECT_4 + SZ_256M - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device cf0_device = {
.id = 0,
.dev = {
.platform_data = &cf0_data,
},
.resource = cf0_resources,
.num_resources = ARRAY_SIZE(cf0_resources),
};
static struct at91_cf_data cf1_data;
static struct resource cf1_resources[] = {
[0] = {
.start = AT91_CHIPSELECT_5,
.end = AT91_CHIPSELECT_5 + SZ_256M - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device cf1_device = {
.id = 1,
.dev = {
.platform_data = &cf1_data,
},
.resource = cf1_resources,
.num_resources = ARRAY_SIZE(cf1_resources),
};
void __init at91_add_device_cf(struct at91_cf_data *data)
{
struct platform_device *pdev;
unsigned long csa;
if (!data)
return;
csa = at91_sys_read(AT91_MATRIX_EBICSA);
switch (data->chipselect) {
case 4:
at91_set_multi_drive(AT91_PIN_PC8, 0);
at91_set_A_periph(AT91_PIN_PC8, 0);
csa |= AT91_MATRIX_CS4A_SMC_CF1;
cf0_data = *data;
pdev = &cf0_device;
break;
case 5:
at91_set_multi_drive(AT91_PIN_PC9, 0);
at91_set_A_periph(AT91_PIN_PC9, 0);
csa |= AT91_MATRIX_CS5A_SMC_CF2;
cf1_data = *data;
pdev = &cf1_device;
break;
default:
printk(KERN_ERR "AT91 CF: bad chip-select requested (%u)\n",
data->chipselect);
return;
}
at91_sys_write(AT91_MATRIX_EBICSA, csa);
if (data->rst_pin) {
at91_set_multi_drive(data->rst_pin, 0);
at91_set_gpio_output(data->rst_pin, 1);
}
if (data->irq_pin) {
at91_set_gpio_input(data->irq_pin, 0);
at91_set_deglitch(data->irq_pin, 1);
}
if (data->det_pin) {
at91_set_gpio_input(data->det_pin, 0);
at91_set_deglitch(data->det_pin, 1);
}
at91_set_B_periph(AT91_PIN_PC6, 0); /* CFCE1 */
at91_set_B_periph(AT91_PIN_PC7, 0); /* CFCE2 */
at91_set_A_periph(AT91_PIN_PC10, 0); /* CFRNW */
at91_set_A_periph(AT91_PIN_PC15, 1); /* NWAIT */
if (data->flags & AT91_CF_TRUE_IDE)
#if defined(CONFIG_PATA_AT91) || defined(CONFIG_PATA_AT91_MODULE)
pdev->name = "pata_at91";
#elif defined(CONFIG_BLK_DEV_IDE_AT91) || defined(CONFIG_BLK_DEV_IDE_AT91_MODULE)
pdev->name = "at91_ide";
#else
#warning "board requires AT91_CF_TRUE_IDE: enable either at91_ide or pata_at91"
#endif
else
pdev->name = "at91_cf";
platform_device_register(pdev);
}
#else
void __init at91_add_device_cf(struct at91_cf_data * data) {}
#endif
/* -------------------------------------------------------------------- */
/*
* These devices are always present and don't need any board-specific
* setup.
*/
static int __init at91_add_standard_devices(void)
{
at91_add_device_rtt();
at91_add_device_watchdog();
at91_add_device_tc();
return 0;
}
arch_initcall(at91_add_standard_devices);
| gpl-2.0 |
klquicksall/Ace-GB-DHD | lib/div64.c | 1754 | 2357 | /*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. __div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
#include <linux/module.h>
#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
{
uint64_t rem = *n;
uint64_t b = base;
uint64_t res, d = 1;
uint32_t high = rem >> 32;
/* Reduce the thing a bit first */
res = 0;
if (high >= base) {
high /= base;
res = (uint64_t) high << 32;
rem -= (uint64_t) (high*base) << 32;
}
while ((int64_t)b > 0 && b < rem) {
b = b+b;
d = d+d;
}
do {
if (rem >= b) {
rem -= b;
res += d;
}
b >>= 1;
d >>= 1;
} while (d);
*n = res;
return rem;
}
EXPORT_SYMBOL(__div64_32);
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
u64 quotient;
if (dividend < 0) {
quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
*remainder = -*remainder;
if (divisor > 0)
quotient = -quotient;
} else {
quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
if (divisor < 0)
quotient = -quotient;
}
return quotient;
}
EXPORT_SYMBOL(div_s64_rem);
#endif
/* 64bit divisor, dividend and result. dynamic precision */
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
u32 high, d;
high = divisor >> 32;
if (high) {
unsigned int shift = fls(high);
d = divisor >> shift;
dividend >>= shift;
} else
d = divisor;
return div_u64(dividend, d);
}
EXPORT_SYMBOL(div64_u64);
#endif
#endif /* BITS_PER_LONG == 32 */
/*
* Iterative div/mod for use when dividend is not expected to be much
* bigger than divisor.
*/
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
return __iter_div_u64_rem(dividend, divisor, remainder);
}
EXPORT_SYMBOL(iter_div_u64_rem);
| gpl-2.0 |
bryce09/Nexus9 | arch/tile/mm/pgtable.c | 2010 | 16786 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/smp.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#define K(x) ((x) << (PAGE_SHIFT-10))
/*
* The normal show_free_areas() is too verbose on Tile, with dozens
* of processors and often four NUMA zones each with high and lowmem.
*/
void show_mem(unsigned int filter)
{
struct zone *zone;
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
" free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
" pagecache:%lu swap:%lu\n",
(global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE)),
(global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE)),
global_page_state(NR_FILE_DIRTY),
global_page_state(NR_WRITEBACK),
global_page_state(NR_UNSTABLE_NFS),
global_page_state(NR_FREE_PAGES),
(global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE)),
global_page_state(NR_FILE_MAPPED),
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
global_page_state(NR_FILE_PAGES),
get_nr_swap_pages());
for_each_zone(zone) {
unsigned long flags, order, total = 0, largest_order = -1;
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
int nr = zone->free_area[order].nr_free;
total += nr << order;
if (nr)
largest_order = order;
}
spin_unlock_irqrestore(&zone->lock, flags);
pr_err("Node %d %7s: %lukB (largest %luKb)\n",
zone_to_nid(zone), zone->name,
K(total), largest_order ? K(1UL) << largest_order : 0);
}
}
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame.
*/
static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = swapper_pg_dir + pgd_index(vaddr);
if (pgd_none(*pgd)) {
BUG();
return;
}
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud)) {
BUG();
return;
}
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd)) {
BUG();
return;
}
pte = pte_offset_kernel(pmd, vaddr);
/* <pfn,flags> stored as-is, to permit clearing entries */
set_pte(pte, pfn_pte(pfn, flags));
/*
* It's enough to flush this one mapping.
* This appears conservative since it is only called
* from __set_fixmap.
*/
local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
}
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
unsigned long address = __fix_to_virt(idx);
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
}
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*
* This function converts a huge PTE mapping kernel LOWMEM into a bunch
* of small PTEs with the same caching. No cache flush required, but we
* must do a global TLB flush.
*
* Any caller that wishes to modify a kernel mapping that might
* have been made with a huge page should call this function,
* since doing so properly avoids race conditions with installing the
* newly-shattered page and then flushing all the TLB entries.
*
* @addr: Address at which to shatter any existing huge page.
*/
void shatter_huge_page(unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long flags = 0; /* happy compiler */
#ifdef __PAGETABLE_PMD_FOLDED
struct list_head *pos;
#endif
/* Get a pointer to the pmd entry that we need to change. */
addr &= HPAGE_MASK;
BUG_ON(pgd_addr_invalid(addr));
BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
pgd = swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
BUG_ON(!pud_present(*pud));
pmd = pmd_offset(pud, addr);
BUG_ON(!pmd_present(*pmd));
if (!pmd_huge_page(*pmd))
return;
spin_lock_irqsave(&init_mm.page_table_lock, flags);
if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */
spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
return;
}
/* Shatter the huge page into the preallocated L2 page table. */
pmd_populate_kernel(&init_mm, pmd,
get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
spin_lock(&pgd_lock);
list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr);
pud = pud_offset(pgd, addr);
copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd);
}
spin_unlock(&pgd_lock);
#endif
/* Tell every cpu to notice the change. */
flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */
spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
}
/*
* List of all pgd's needed so it can invalidate entries in both cached
* and uncached pgd's. This is essentially codepath-based locking
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
*
* The lock is always taken with interrupts disabled, unlike on x86
* and other platforms, because we need to take the lock in
* shatter_huge_page(), which may be called from an interrupt context.
* We are not at risk from the tlbflush IPI deadlock that was seen on
* x86, since we use the flush_remote() API to have the hypervisor do
* the TLB flushes regardless of irq disabling.
*/
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
static inline void pgd_list_add(pgd_t *pgd)
{
list_add(pgd_to_list(pgd), &pgd_list);
}
static inline void pgd_list_del(pgd_t *pgd)
{
list_del(pgd_to_list(pgd));
}
#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
static void pgd_ctor(pgd_t *pgd)
{
unsigned long flags;
memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
spin_lock_irqsave(&pgd_lock, flags);
#ifndef __tilegx__
/*
* Check that the user interrupt vector has no L2.
* It never should for the swapper, and new page tables
* should always start with an empty user interrupt vector.
*/
BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
#endif
memcpy(pgd + KERNEL_PGD_INDEX_START,
swapper_pg_dir + KERNEL_PGD_INDEX_START,
KERNEL_PGD_PTRS * sizeof(pgd_t));
pgd_list_add(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
static void pgd_dtor(pgd_t *pgd)
{
unsigned long flags; /* can be called from interrupt context */
spin_lock_irqsave(&pgd_lock, flags);
pgd_list_del(pgd);
spin_unlock_irqrestore(&pgd_lock, flags);
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
if (pgd)
pgd_ctor(pgd);
return pgd;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
pgd_dtor(pgd);
kmem_cache_free(pgd_cache, pgd);
}
#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
int order)
{
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
struct page *p;
int i;
p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
if (p == NULL)
return NULL;
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
* correctly with tlb_remove_page().
*/
for (i = 1; i < order; ++i) {
init_page_count(p+i);
inc_zone_page_state(p+i, NR_PAGETABLE);
}
pgtable_page_ctor(p);
return p;
}
/*
* Free page immediately (used in __pte_alloc if we raced with another
* process). We have to correct whatever pte_alloc_one() did before
* returning the pages to the allocator.
*/
void pgtable_free(struct mm_struct *mm, struct page *p, int order)
{
int i;
pgtable_page_dtor(p);
__free_page(p);
for (i = 1; i < order; ++i) {
__free_page(p+i);
dec_zone_page_state(p+i, NR_PAGETABLE);
}
}
void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
unsigned long address, int order)
{
int i;
pgtable_page_dtor(pte);
tlb_remove_page(tlb, pte);
for (i = 1; i < order; ++i) {
tlb_remove_page(tlb, pte + i);
dec_zone_page_state(pte + i, NR_PAGETABLE);
}
}
#ifndef __tilegx__
/*
* FIXME: needs to be atomic vs hypervisor writes. For now we make the
* window of vulnerability a bit smaller by doing an unlocked 8-bit update.
*/
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
# error Code assumes HV_PTE "accessed" bit in second byte
#endif
u8 *tmp = (u8 *)ptep;
u8 second_byte = tmp[1];
if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
return 0;
tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
return 1;
}
/*
* This implementation is atomic vs hypervisor writes, since the hypervisor
* always writes the low word (where "accessed" and "dirty" are) and this
* routine only writes the high word.
*/
void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#if HV_PTE_INDEX_WRITABLE < 32
# error Code assumes HV_PTE "writable" bit in high word
#endif
u32 *tmp = (u32 *)ptep;
tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
}
#endif
pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
if (pgd_addr_invalid(addr))
return NULL;
pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
return NULL;
pmd = pmd_offset(pud, addr);
if (pmd_huge_page(*pmd))
return (pte_t *)pmd;
if (!pmd_present(*pmd))
return NULL;
return pte_offset_kernel(pmd, addr);
}
pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
{
unsigned int width = smp_width;
int x = cpu % width;
int y = cpu / width;
BUG_ON(y >= smp_height);
BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
BUG_ON(cpu < 0 || cpu >= NR_CPUS);
BUG_ON(!cpu_is_valid_lotar(cpu));
return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
}
int get_remote_cache_cpu(pgprot_t prot)
{
HV_LOTAR lotar = hv_pte_get_lotar(prot);
int x = HV_LOTAR_X(lotar);
int y = HV_LOTAR_Y(lotar);
BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
return x + y * smp_width;
}
/*
* Convert a kernel VA to a PA and homing information.
*/
int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
{
struct page *page = virt_to_page(va);
pte_t null_pte = { 0 };
*cpa = __pa(va);
/* Note that this is not writing a page table, just returning a pte. */
*pte = pte_set_home(null_pte, page_home(page));
return 0; /* return non-zero if not hfh? */
}
EXPORT_SYMBOL(va_to_cpa_and_pte);
void __set_pte(pte_t *ptep, pte_t pte)
{
#ifdef __tilegx__
*ptep = pte;
#else
# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
# error Must write the present and migrating bits last
# endif
if (pte_present(pte)) {
((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
barrier();
((u32 *)ptep)[0] = (u32)(pte_val(pte));
} else {
((u32 *)ptep)[0] = (u32)(pte_val(pte));
barrier();
((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
}
#endif /* __tilegx__ */
}
void set_pte(pte_t *ptep, pte_t pte)
{
if (pte_present(pte) &&
(!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
/* The PTE actually references physical memory. */
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
/* Update the home of the PTE from the struct page. */
pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
} else if (hv_pte_get_mode(pte) == 0) {
/* remap_pfn_range(), etc, must supply PTE mode. */
panic("set_pte(): out-of-range PFN and mode 0\n");
}
}
__set_pte(ptep, pte);
}
/* Can this mm load a PTE with cached_priority set? */
static inline int mm_is_priority_cached(struct mm_struct *mm)
{
return mm->context.priority_cached != 0;
}
/*
* Add a priority mapping to an mm_context and
* notify the hypervisor if this is the first one.
*/
void start_mm_caching(struct mm_struct *mm)
{
if (!mm_is_priority_cached(mm)) {
mm->context.priority_cached = -1UL;
hv_set_caching(-1UL);
}
}
/*
* Validate and return the priority_cached flag. We know if it's zero
* that we don't need to scan, since we immediately set it non-zero
* when we first consider a MAP_CACHE_PRIORITY mapping.
*
* We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
* since we're in an interrupt context (servicing switch_mm) we don't
* worry about it and don't unset the "priority_cached" field.
* Presumably we'll come back later and have more luck and clear
* the value then; for now we'll just keep the cache marked for priority.
*/
static unsigned long update_priority_cached(struct mm_struct *mm)
{
if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
struct vm_area_struct *vm;
for (vm = mm->mmap; vm; vm = vm->vm_next) {
if (hv_pte_get_cached_priority(vm->vm_page_prot))
break;
}
if (vm == NULL)
mm->context.priority_cached = 0;
up_write(&mm->mmap_sem);
}
return mm->context.priority_cached;
}
/* Set caching correctly for an mm that we are switching to. */
void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
{
if (!mm_is_priority_cached(next)) {
/*
* If the new mm doesn't use priority caching, just see if we
* need the hv_set_caching(), or can assume it's already zero.
*/
if (mm_is_priority_cached(prev))
hv_set_caching(0);
} else {
hv_set_caching(update_priority_cached(next));
}
}
#if CHIP_HAS_MMIO()
/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
pgprot_t home)
{
void *addr;
struct vm_struct *area;
unsigned long offset, last_addr;
pgprot_t pgprot;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/* Create a read/write, MMIO VA mapping homed at the requested shim. */
pgprot = PAGE_KERNEL;
pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
return NULL;
}
return (__force void __iomem *) (offset + (char *)addr);
}
EXPORT_SYMBOL(ioremap_prot);
/* Unmap an MMIO VA mapping. */
void iounmap(volatile void __iomem *addr_in)
{
volatile void __iomem *addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr_in);
#if 1
vunmap((void * __force)addr);
#else
/* x86 uses this complicated flow instead of vunmap(). Is
* there any particular reason we should do the same? */
struct vm_struct *p, *o;
/* Use the vm area unlocked, assuming the caller
ensures there isn't another iounmap for the same address
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
p = find_vm_area((void *)addr);
if (!p) {
pr_err("iounmap: bad address %p\n", addr);
dump_stack();
return;
}
/* Finally remove it */
o = remove_vm_area((void *)addr);
BUG_ON(p != o || o == NULL);
kfree(p);
#endif
}
EXPORT_SYMBOL(iounmap);
#endif /* CHIP_HAS_MMIO() */
| gpl-2.0 |
Anik1199/android_kernel_mediatek_sprout | arch/mips/math-emu/ieee754sp.c | 2522 | 5334 | /* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
int ieee754sp_class(ieee754sp x)
{
COMPXSP;
EXPLODEXSP;
return xc;
}
int ieee754sp_isnan(ieee754sp x)
{
return ieee754sp_class(x) >= IEEE754_CLASS_SNAN;
}
int ieee754sp_issnan(ieee754sp x)
{
assert(ieee754sp_isnan(x));
return (SPMANT(x) & SP_MBIT(SP_MBITS-1));
}
ieee754sp ieee754sp_xcpt(ieee754sp r, const char *op, ...)
{
struct ieee754xctx ax;
if (!TSTX())
return r;
ax.op = op;
ax.rt = IEEE754_RT_SP;
ax.rv.sp = r;
va_start(ax.ap, op);
ieee754_xcpt(&ax);
va_end(ax.ap);
return ax.rv.sp;
}
ieee754sp ieee754sp_nanxcpt(ieee754sp r, const char *op, ...)
{
struct ieee754xctx ax;
assert(ieee754sp_isnan(r));
if (!ieee754sp_issnan(r)) /* QNAN does not cause invalid op !! */
return r;
if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) {
/* not enabled convert to a quiet NaN */
SPMANT(r) &= (~SP_MBIT(SP_MBITS-1));
if (ieee754sp_isnan(r))
return r;
else
return ieee754sp_indef();
}
ax.op = op;
ax.rt = 0;
ax.rv.sp = r;
va_start(ax.ap, op);
ieee754_xcpt(&ax);
va_end(ax.ap);
return ax.rv.sp;
}
ieee754sp ieee754sp_bestnan(ieee754sp x, ieee754sp y)
{
assert(ieee754sp_isnan(x));
assert(ieee754sp_isnan(y));
if (SPMANT(x) > SPMANT(y))
return x;
else
return y;
}
static unsigned get_rounding(int sn, unsigned xm)
{
/* inexact must round of 3 bits
*/
if (xm & (SP_MBIT(3) - 1)) {
switch (ieee754_csr.rm) {
case IEEE754_RZ:
break;
case IEEE754_RN:
xm += 0x3 + ((xm >> 3) & 1);
/* xm += (xm&0x8)?0x4:0x3 */
break;
case IEEE754_RU: /* toward +Infinity */
if (!sn) /* ?? */
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
if (sn) /* ?? */
xm += 0x8;
break;
}
}
return xm;
}
/* generate a normal/denormal number with over,under handling
* sn is sign
* xe is an unbiased exponent
* xm is 3bit extended precision value.
*/
ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
assert((xm >> (SP_MBITS + 1 + 3)) == 0); /* no execess */
assert(xm & (SP_HIDDEN_BIT << 3));
if (xe < SP_EMIN) {
/* strip lower bits */
int es = SP_EMIN - xe;
if (ieee754_csr.nod) {
SETCX(IEEE754_UNDERFLOW);
SETCX(IEEE754_INEXACT);
switch(ieee754_csr.rm) {
case IEEE754_RN:
case IEEE754_RZ:
return ieee754sp_zero(sn);
case IEEE754_RU: /* toward +Infinity */
if(sn == 0)
return ieee754sp_min(0);
else
return ieee754sp_zero(1);
case IEEE754_RD: /* toward -Infinity */
if(sn == 0)
return ieee754sp_zero(0);
else
return ieee754sp_min(1);
}
}
if (xe == SP_EMIN - 1
&& get_rounding(sn, xm) >> (SP_MBITS + 1 + 3))
{
/* Not tiny after rounding */
SETCX(IEEE754_INEXACT);
xm = get_rounding(sn, xm);
xm >>= 1;
/* Clear grs bits */
xm &= ~(SP_MBIT(3) - 1);
xe++;
}
else {
/* sticky right shift es bits
*/
SPXSRSXn(es);
assert((xm & (SP_HIDDEN_BIT << 3)) == 0);
assert(xe == SP_EMIN);
}
}
if (xm & (SP_MBIT(3) - 1)) {
SETCX(IEEE754_INEXACT);
if ((xm & (SP_HIDDEN_BIT << 3)) == 0) {
SETCX(IEEE754_UNDERFLOW);
}
/* inexact must round of 3 bits
*/
xm = get_rounding(sn, xm);
/* adjust exponent for rounding add overflowing
*/
if (xm >> (SP_MBITS + 1 + 3)) {
/* add causes mantissa overflow */
xm >>= 1;
xe++;
}
}
/* strip grs bits */
xm >>= 3;
assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */
assert(xe >= SP_EMIN);
if (xe > SP_EMAX) {
SETCX(IEEE754_OVERFLOW);
SETCX(IEEE754_INEXACT);
/* -O can be table indexed by (rm,sn) */
switch (ieee754_csr.rm) {
case IEEE754_RN:
return ieee754sp_inf(sn);
case IEEE754_RZ:
return ieee754sp_max(sn);
case IEEE754_RU: /* toward +Infinity */
if (sn == 0)
return ieee754sp_inf(0);
else
return ieee754sp_max(1);
case IEEE754_RD: /* toward -Infinity */
if (sn == 0)
return ieee754sp_max(0);
else
return ieee754sp_inf(1);
}
}
/* gen norm/denorm/zero */
if ((xm & SP_HIDDEN_BIT) == 0) {
/* we underflow (tiny/zero) */
assert(xe == SP_EMIN);
if (ieee754_csr.mx & IEEE754_UNDERFLOW)
SETCX(IEEE754_UNDERFLOW);
return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
} else {
assert((xm >> (SP_MBITS + 1)) == 0); /* no execess */
assert(xm & SP_HIDDEN_BIT);
return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
}
}
| gpl-2.0 |
Shabbypenguin/Jellybean_kernel | net/dsa/slave.c | 2778 | 10433 | /*
* net/dsa/slave.c - Slave device handling
* Copyright (c) 2008-2009 Marvell Semiconductor
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include "dsa_priv.h"
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
struct dsa_switch *ds = bus->priv;
if (ds->phys_port_mask & (1 << addr))
return ds->drv->phy_read(ds, addr, reg);
return 0xffff;
}
static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct dsa_switch *ds = bus->priv;
if (ds->phys_port_mask & (1 << addr))
return ds->drv->phy_write(ds, addr, reg, val);
return 0;
}
void dsa_slave_mii_bus_init(struct dsa_switch *ds)
{
ds->slave_mii_bus->priv = (void *)ds;
ds->slave_mii_bus->name = "dsa slave smi";
ds->slave_mii_bus->read = dsa_slave_phy_read;
ds->slave_mii_bus->write = dsa_slave_phy_write;
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s:%.2x",
ds->master_mii_bus->id, ds->pd->sw_addr);
ds->slave_mii_bus->parent = &ds->master_mii_bus->dev;
}
/* slave device handling ****************************************************/
static int dsa_slave_init(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
dev->iflink = p->parent->dst->master_netdev->ifindex;
return 0;
}
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
int err;
if (!(master->flags & IFF_UP))
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
err = dev_uc_add(master, dev->dev_addr);
if (err < 0)
goto out;
}
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(master, 1);
if (err < 0)
goto del_unicast;
}
if (dev->flags & IFF_PROMISC) {
err = dev_set_promiscuity(master, 1);
if (err < 0)
goto clear_allmulti;
}
return 0;
clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
del_unicast:
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
return err;
}
static int dsa_slave_close(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_unsync(master, dev);
dev_uc_unsync(master, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
if (dev->flags & IFF_PROMISC)
dev_set_promiscuity(master, -1);
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
return 0;
}
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
if (change & IFF_ALLMULTI)
dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
if (change & IFF_PROMISC)
dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
}
static void dsa_slave_set_rx_mode(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
dev_mc_sync(master, dev);
dev_uc_sync(master, dev);
}
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
struct sockaddr *addr = a;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
err = dev_uc_add(master, addr->sa_data);
if (err < 0)
return err;
}
if (compare_ether_addr(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
out:
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
return 0;
}
static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
if (p->phy != NULL)
return phy_mii_ioctl(p->phy, ifr, cmd);
return -EOPNOTSUPP;
}
/* ethtool operations *******************************************************/
static int
dsa_slave_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
int err;
err = -EOPNOTSUPP;
if (p->phy != NULL) {
err = phy_read_status(p->phy);
if (err == 0)
err = phy_ethtool_gset(p->phy, cmd);
}
return err;
}
static int
dsa_slave_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dsa_slave_priv *p = netdev_priv(dev);
if (p->phy != NULL)
return phy_ethtool_sset(p->phy, cmd);
return -EOPNOTSUPP;
}
static void dsa_slave_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, "dsa", 32);
strncpy(drvinfo->version, dsa_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, "platform", 32);
}
static int dsa_slave_nway_reset(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
if (p->phy != NULL)
return genphy_restart_aneg(p->phy);
return -EOPNOTSUPP;
}
static u32 dsa_slave_get_link(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
if (p->phy != NULL) {
genphy_update_link(p->phy);
return p->phy->link;
}
return -EOPNOTSUPP;
}
static void dsa_slave_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
if (stringset == ETH_SS_STATS) {
int len = ETH_GSTRING_LEN;
strncpy(data, "tx_packets", len);
strncpy(data + len, "tx_bytes", len);
strncpy(data + 2 * len, "rx_packets", len);
strncpy(data + 3 * len, "rx_bytes", len);
if (ds->drv->get_strings != NULL)
ds->drv->get_strings(ds, p->port, data + 4 * len);
}
}
static void dsa_slave_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
data[0] = p->dev->stats.tx_packets;
data[1] = p->dev->stats.tx_bytes;
data[2] = p->dev->stats.rx_packets;
data[3] = p->dev->stats.rx_bytes;
if (ds->drv->get_ethtool_stats != NULL)
ds->drv->get_ethtool_stats(ds, p->port, data + 4);
}
static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
if (sset == ETH_SS_STATS) {
int count;
count = 4;
if (ds->drv->get_sset_count != NULL)
count += ds->drv->get_sset_count(ds);
return count;
}
return -EOPNOTSUPP;
}
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
.get_drvinfo = dsa_slave_get_drvinfo,
.nway_reset = dsa_slave_nway_reset,
.get_link = dsa_slave_get_link,
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
};
#ifdef CONFIG_NET_DSA_TAG_DSA
static const struct net_device_ops dsa_netdev_ops = {
.ndo_init = dsa_slave_init,
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
.ndo_start_xmit = dsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
static const struct net_device_ops edsa_netdev_ops = {
.ndo_init = dsa_slave_init,
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
.ndo_start_xmit = edsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
#endif
#ifdef CONFIG_NET_DSA_TAG_TRAILER
static const struct net_device_ops trailer_netdev_ops = {
.ndo_init = dsa_slave_init,
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
.ndo_start_xmit = trailer_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
#endif
/* slave device setup *******************************************************/
struct net_device *
dsa_slave_create(struct dsa_switch *ds, struct device *parent,
int port, char *name)
{
struct net_device *master = ds->dst->master_netdev;
struct net_device *slave_dev;
struct dsa_slave_priv *p;
int ret;
slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv),
name, ether_setup);
if (slave_dev == NULL)
return slave_dev;
slave_dev->features = master->vlan_features;
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
slave_dev->tx_queue_len = 0;
switch (ds->dst->tag_protocol) {
#ifdef CONFIG_NET_DSA_TAG_DSA
case htons(ETH_P_DSA):
slave_dev->netdev_ops = &dsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
case htons(ETH_P_EDSA):
slave_dev->netdev_ops = &edsa_netdev_ops;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_TRAILER
case htons(ETH_P_TRAILER):
slave_dev->netdev_ops = &trailer_netdev_ops;
break;
#endif
default:
BUG();
}
SET_NETDEV_DEV(slave_dev, parent);
slave_dev->vlan_features = master->vlan_features;
p = netdev_priv(slave_dev);
p->dev = slave_dev;
p->parent = ds;
p->port = port;
p->phy = ds->slave_mii_bus->phy_map[port];
ret = register_netdev(slave_dev);
if (ret) {
printk(KERN_ERR "%s: error %d registering interface %s\n",
master->name, ret, slave_dev->name);
free_netdev(slave_dev);
return NULL;
}
netif_carrier_off(slave_dev);
if (p->phy != NULL) {
phy_attach(slave_dev, dev_name(&p->phy->dev),
0, PHY_INTERFACE_MODE_GMII);
p->phy->autoneg = AUTONEG_ENABLE;
p->phy->speed = 0;
p->phy->duplex = 0;
p->phy->advertising = p->phy->supported | ADVERTISED_Autoneg;
phy_start_aneg(p->phy);
}
return slave_dev;
}
| gpl-2.0 |
pacerom/kernel_bn_encore | drivers/isdn/hisax/isdnl3.c | 3034 | 13400 | /* $Id: isdnl3.c,v 2.22.2.3 2004/01/13 14:31:25 keil Exp $
*
* Author Karsten Keil
* based on the teles driver from Jan den Ouden
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
* Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
*
*/
#include <linux/init.h>
#include <linux/slab.h>
#include "hisax.h"
#include "isdnl3.h"
const char *l3_revision = "$Revision: 2.22.2.3 $";
static struct Fsm l3fsm;
enum {
ST_L3_LC_REL,
ST_L3_LC_ESTAB_WAIT,
ST_L3_LC_REL_DELAY,
ST_L3_LC_REL_WAIT,
ST_L3_LC_ESTAB,
};
#define L3_STATE_COUNT (ST_L3_LC_ESTAB+1)
static char *strL3State[] =
{
"ST_L3_LC_REL",
"ST_L3_LC_ESTAB_WAIT",
"ST_L3_LC_REL_DELAY",
"ST_L3_LC_REL_WAIT",
"ST_L3_LC_ESTAB",
};
enum {
EV_ESTABLISH_REQ,
EV_ESTABLISH_IND,
EV_ESTABLISH_CNF,
EV_RELEASE_REQ,
EV_RELEASE_CNF,
EV_RELEASE_IND,
EV_TIMEOUT,
};
#define L3_EVENT_COUNT (EV_TIMEOUT+1)
static char *strL3Event[] =
{
"EV_ESTABLISH_REQ",
"EV_ESTABLISH_IND",
"EV_ESTABLISH_CNF",
"EV_RELEASE_REQ",
"EV_RELEASE_CNF",
"EV_RELEASE_IND",
"EV_TIMEOUT",
};
static __attribute__((format(printf, 2, 3))) void
l3m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
struct PStack *st = fi->userdata;
va_start(args, fmt);
VHiSax_putstatus(st->l1.hardware, st->l3.debug_id, fmt, args);
va_end(args);
}
u_char *
findie(u_char * p, int size, u_char ie, int wanted_set)
{
int l, codeset, maincodeset;
u_char *pend = p + size;
/* skip protocol discriminator, callref and message type */
p++;
l = (*p++) & 0xf;
p += l;
p++;
codeset = 0;
maincodeset = 0;
/* while there are bytes left... */
while (p < pend) {
if ((*p & 0xf0) == 0x90) {
codeset = *p & 0x07;
if (!(*p & 0x08))
maincodeset = codeset;
}
if (*p & 0x80)
p++;
else {
if (codeset == wanted_set) {
if (*p == ie)
{ /* improved length check (Werner Cornelius) */
if ((pend - p) < 2)
return(NULL);
if (*(p+1) > (pend - (p+2)))
return(NULL);
return (p);
}
if (*p > ie)
return (NULL);
}
p++;
l = *p++;
p += l;
codeset = maincodeset;
}
}
return (NULL);
}
int
getcallref(u_char * p)
{
int l, cr = 0;
p++; /* prot discr */
if (*p & 0xfe) /* wrong callref BRI only 1 octet*/
return(-2);
l = 0xf & *p++; /* callref length */
if (!l) /* dummy CallRef */
return(-1);
cr = *p++;
return (cr);
}
static int OrigCallRef = 0;
int
newcallref(void)
{
if (OrigCallRef == 127)
OrigCallRef = 1;
else
OrigCallRef++;
return (OrigCallRef);
}
void
newl3state(struct l3_process *pc, int state)
{
if (pc->debug & L3_DEB_STATE)
l3_debug(pc->st, "newstate cr %d %d --> %d",
pc->callref & 0x7F,
pc->state, state);
pc->state = state;
}
static void
L3ExpireTimer(struct L3Timer *t)
{
t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
}
void
L3InitTimer(struct l3_process *pc, struct L3Timer *t)
{
t->pc = pc;
t->tl.function = (void *) L3ExpireTimer;
t->tl.data = (long) t;
init_timer(&t->tl);
}
void
L3DelTimer(struct L3Timer *t)
{
del_timer(&t->tl);
}
int
L3AddTimer(struct L3Timer *t,
int millisec, int event)
{
if (timer_pending(&t->tl)) {
printk(KERN_WARNING "L3AddTimer: timer already active!\n");
return -1;
}
init_timer(&t->tl);
t->event = event;
t->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&t->tl);
return 0;
}
void
StopAllL3Timer(struct l3_process *pc)
{
L3DelTimer(&pc->timer);
}
struct sk_buff *
l3_alloc_skb(int len)
{
struct sk_buff *skb;
if (!(skb = alloc_skb(len + MAX_HEADER_LEN, GFP_ATOMIC))) {
printk(KERN_WARNING "HiSax: No skb for D-channel\n");
return (NULL);
}
skb_reserve(skb, MAX_HEADER_LEN);
return (skb);
}
static void
no_l3_proto(struct PStack *st, int pr, void *arg)
{
struct sk_buff *skb = arg;
HiSax_putstatus(st->l1.hardware, "L3", "no D protocol");
if (skb) {
dev_kfree_skb(skb);
}
}
static int
no_l3_proto_spec(struct PStack *st, isdn_ctrl *ic)
{
printk(KERN_WARNING "HiSax: no specific protocol handler for proto %lu\n",ic->arg & 0xFF);
return(-1);
}
struct l3_process
*getl3proc(struct PStack *st, int cr)
{
struct l3_process *p = st->l3.proc;
while (p)
if (p->callref == cr)
return (p);
else
p = p->next;
return (NULL);
}
struct l3_process
*new_l3_process(struct PStack *st, int cr)
{
struct l3_process *p, *np;
if (!(p = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
printk(KERN_ERR "HiSax can't get memory for cr %d\n", cr);
return (NULL);
}
if (!st->l3.proc)
st->l3.proc = p;
else {
np = st->l3.proc;
while (np->next)
np = np->next;
np->next = p;
}
p->next = NULL;
p->debug = st->l3.debug;
p->callref = cr;
p->state = 0;
p->chan = NULL;
p->st = st;
p->N303 = st->l3.N303;
L3InitTimer(p, &p->timer);
return (p);
};
void
release_l3_process(struct l3_process *p)
{
struct l3_process *np, *pp = NULL;
if (!p)
return;
np = p->st->l3.proc;
while (np) {
if (np == p) {
StopAllL3Timer(p);
if (pp)
pp->next = np->next;
else if (!(p->st->l3.proc = np->next) &&
!test_bit(FLG_PTP, &p->st->l2.flag)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: last process");
if (skb_queue_empty(&p->st->l3.squeue)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: release link");
if (p->st->protocol != ISDN_PTYPE_NI1)
FsmEvent(&p->st->l3.l3m, EV_RELEASE_REQ, NULL);
else
FsmEvent(&p->st->l3.l3m, EV_RELEASE_IND, NULL);
} else {
if (p->debug)
l3_debug(p->st, "release_l3_process: not release link");
}
}
kfree(p);
return;
}
pp = np;
np = np->next;
}
printk(KERN_ERR "HiSax internal L3 error CR(%d) not in list\n", p->callref);
l3_debug(p->st, "HiSax internal L3 error CR(%d) not in list", p->callref);
};
static void
l3ml3p(struct PStack *st, int pr)
{
struct l3_process *p = st->l3.proc;
struct l3_process *np;
while (p) {
/* p might be kfreed under us, so we need to save where we want to go on */
np = p->next;
st->l3.l3ml3(st, pr, p);
p = np;
}
}
void
setstack_l3dc(struct PStack *st, struct Channel *chanp)
{
char tmp[64];
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
FsmInitTimer(&st->l3.l3m, &st->l3.l3m_timer);
strcpy(st->l3.debug_id, "L3DC ");
st->lli.l4l3_proto = no_l3_proto_spec;
#ifdef CONFIG_HISAX_EURO
if (st->protocol == ISDN_PTYPE_EURO) {
setstack_dss1(st);
} else
#endif
#ifdef CONFIG_HISAX_NI1
if (st->protocol == ISDN_PTYPE_NI1) {
setstack_ni1(st);
} else
#endif
#ifdef CONFIG_HISAX_1TR6
if (st->protocol == ISDN_PTYPE_1TR6) {
setstack_1tr6(st);
} else
#endif
if (st->protocol == ISDN_PTYPE_LEASED) {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
printk(KERN_INFO "HiSax: Leased line mode\n");
} else {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
sprintf(tmp, "protocol %s not supported",
(st->protocol == ISDN_PTYPE_1TR6) ? "1tr6" :
(st->protocol == ISDN_PTYPE_EURO) ? "euro" :
(st->protocol == ISDN_PTYPE_NI1) ? "ni1" :
"unknown");
printk(KERN_WARNING "HiSax: %s\n", tmp);
st->protocol = -1;
}
}
static void
isdnl3_trans(struct PStack *st, int pr, void *arg) {
st->l3.l3l2(st, pr, arg);
}
void
releasestack_isdnl3(struct PStack *st)
{
while (st->l3.proc)
release_l3_process(st->l3.proc);
if (st->l3.global) {
StopAllL3Timer(st->l3.global);
kfree(st->l3.global);
st->l3.global = NULL;
}
FsmDelTimer(&st->l3.l3m_timer, 54);
skb_queue_purge(&st->l3.squeue);
}
void
setstack_l3bc(struct PStack *st, struct Channel *chanp)
{
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
strcpy(st->l3.debug_id, "L3BC ");
st->lli.l4l3 = isdnl3_trans;
}
#define DREL_TIMER_VALUE 40000
static void
lc_activate(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_ESTAB_WAIT);
st->l3.l3l2(st, DL_ESTABLISH | REQUEST, NULL);
}
static void
lc_connect(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connect: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | INDICATION);
}
static void
lc_connected(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmDelTimer(&st->l3.l3m_timer, 51);
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connected: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | CONFIRM);
}
static void
lc_start_delay(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_start_delay_check(struct FsmInst *fi, int event, void *arg)
/* 20/09/00 - GE timer not user for NI-1 as layer 2 should stay up */
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
/* 19/09/00 - GE timer not user for NI-1 */
if (st->protocol != ISDN_PTYPE_NI1)
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_release_req(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
if (test_bit(FLG_L2BLOCK, &st->l2.flag)) {
if (st->l3.debug)
l3_debug(st, "lc_release_req: l2 blocked");
/* restart release timer */
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 51);
} else {
FsmChangeState(fi, ST_L3_LC_REL_WAIT);
st->l3.l3l2(st, DL_RELEASE | REQUEST, NULL);
}
}
static void
lc_release_ind(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmDelTimer(&st->l3.l3m_timer, 52);
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | INDICATION);
}
static void
lc_release_cnf(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | CONFIRM);
}
/* *INDENT-OFF* */
static struct FsmNode L3FnList[] __initdata =
{
{ST_L3_LC_REL, EV_ESTABLISH_REQ, lc_activate},
{ST_L3_LC_REL, EV_ESTABLISH_IND, lc_connect},
{ST_L3_LC_REL, EV_ESTABLISH_CNF, lc_connect},
{ST_L3_LC_ESTAB_WAIT, EV_ESTABLISH_CNF, lc_connected},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_REQ, lc_start_delay},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_REQ, lc_start_delay_check},
{ST_L3_LC_REL_DELAY, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_REL_DELAY, EV_ESTABLISH_REQ, lc_connected},
{ST_L3_LC_REL_DELAY, EV_TIMEOUT, lc_release_req},
{ST_L3_LC_REL_WAIT, EV_RELEASE_CNF, lc_release_cnf},
{ST_L3_LC_REL_WAIT, EV_ESTABLISH_REQ, lc_activate},
};
/* *INDENT-ON* */
void
l3_msg(struct PStack *st, int pr, void *arg)
{
switch (pr) {
case (DL_DATA | REQUEST):
if (st->l3.l3m.state == ST_L3_LC_ESTAB) {
st->l3.l3l2(st, pr, arg);
} else {
struct sk_buff *skb = arg;
skb_queue_tail(&st->l3.squeue, skb);
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
}
break;
case (DL_ESTABLISH | REQUEST):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
break;
case (DL_ESTABLISH | CONFIRM):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_CNF, NULL);
break;
case (DL_ESTABLISH | INDICATION):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_IND, NULL);
break;
case (DL_RELEASE | INDICATION):
FsmEvent(&st->l3.l3m, EV_RELEASE_IND, NULL);
break;
case (DL_RELEASE | CONFIRM):
FsmEvent(&st->l3.l3m, EV_RELEASE_CNF, NULL);
break;
case (DL_RELEASE | REQUEST):
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
break;
}
}
int __init
Isdnl3New(void)
{
l3fsm.state_count = L3_STATE_COUNT;
l3fsm.event_count = L3_EVENT_COUNT;
l3fsm.strEvent = strL3Event;
l3fsm.strState = strL3State;
return FsmNew(&l3fsm, L3FnList, ARRAY_SIZE(L3FnList));
}
void
Isdnl3Free(void)
{
FsmFree(&l3fsm);
}
| gpl-2.0 |
GalaxyTab4/android_kernel_samsung_degaswifi | drivers/video/omap/lcd_palmte.c | 3290 | 2585 | /*
* LCD panel support for the Palm Tungsten E
*
* Original version : Romain Goyet <r.goyet@gmail.com>
* Current version : Laurent Gonzalez <palmte.linux@free.fr>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include "omapfb.h"
static int palmte_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
{
return 0;
}
static void palmte_panel_cleanup(struct lcd_panel *panel)
{
}
static int palmte_panel_enable(struct lcd_panel *panel)
{
return 0;
}
static void palmte_panel_disable(struct lcd_panel *panel)
{
}
static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
{
return 0;
}
struct lcd_panel palmte_panel = {
.name = "palmte",
.config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
OMAP_LCDC_HSVS_OPPOSITE,
.data_lines = 16,
.bpp = 8,
.pixel_clock = 12000,
.x_res = 320,
.y_res = 320,
.hsw = 4,
.hfp = 8,
.hbp = 28,
.vsw = 1,
.vfp = 8,
.vbp = 7,
.pcd = 0,
.init = palmte_panel_init,
.cleanup = palmte_panel_cleanup,
.enable = palmte_panel_enable,
.disable = palmte_panel_disable,
.get_caps = palmte_panel_get_caps,
};
static int palmte_panel_probe(struct platform_device *pdev)
{
omapfb_register_panel(&palmte_panel);
return 0;
}
static int palmte_panel_remove(struct platform_device *pdev)
{
return 0;
}
static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
{
return 0;
}
static int palmte_panel_resume(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver palmte_panel_driver = {
.probe = palmte_panel_probe,
.remove = palmte_panel_remove,
.suspend = palmte_panel_suspend,
.resume = palmte_panel_resume,
.driver = {
.name = "lcd_palmte",
.owner = THIS_MODULE,
},
};
module_platform_driver(palmte_panel_driver);
| gpl-2.0 |
TeamHydra/android_kernel_samsung_n7100 | drivers/hwmon/vt1211.c | 3290 | 39756 | /*
* vt1211.c - driver for the VIA VT1211 Super-I/O chip integrated hardware
* monitoring features
* Copyright (C) 2006 Juerg Haefliger <juergh@gmail.com>
*
* This driver is based on the driver for kernel 2.4 by Mark D. Studebaker
* and its port to kernel 2.6 by Lars Ekman.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/io.h>
static int uch_config = -1;
module_param(uch_config, int, 0);
MODULE_PARM_DESC(uch_config, "Initialize the universal channel configuration");
static int int_mode = -1;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, "Force the temperature interrupt mode");
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
static struct platform_device *pdev;
#define DRVNAME "vt1211"
/* ---------------------------------------------------------------------
* Registers
*
* The sensors are defined as follows.
*
* Sensor Voltage Mode Temp Mode Notes (from the datasheet)
* -------- ------------ --------- --------------------------
* Reading 1 temp1 Intel thermal diode
* Reading 3 temp2 Internal thermal diode
* UCH1/Reading2 in0 temp3 NTC type thermistor
* UCH2 in1 temp4 +2.5V
* UCH3 in2 temp5 VccP
* UCH4 in3 temp6 +5V
* UCH5 in4 temp7 +12V
* 3.3V in5 Internal VDD (+3.3V)
*
* --------------------------------------------------------------------- */
/* Voltages (in) numbered 0-5 (ix) */
#define VT1211_REG_IN(ix) (0x21 + (ix))
#define VT1211_REG_IN_MIN(ix) ((ix) == 0 ? 0x3e : 0x2a + 2 * (ix))
#define VT1211_REG_IN_MAX(ix) ((ix) == 0 ? 0x3d : 0x29 + 2 * (ix))
/* Temperatures (temp) numbered 0-6 (ix) */
static u8 regtemp[] = {0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25};
static u8 regtempmax[] = {0x39, 0x1d, 0x3d, 0x2b, 0x2d, 0x2f, 0x31};
static u8 regtemphyst[] = {0x3a, 0x1e, 0x3e, 0x2c, 0x2e, 0x30, 0x32};
/* Fans numbered 0-1 (ix) */
#define VT1211_REG_FAN(ix) (0x29 + (ix))
#define VT1211_REG_FAN_MIN(ix) (0x3b + (ix))
#define VT1211_REG_FAN_DIV 0x47
/* PWMs numbered 0-1 (ix) */
/* Auto points numbered 0-3 (ap) */
#define VT1211_REG_PWM(ix) (0x60 + (ix))
#define VT1211_REG_PWM_CLK 0x50
#define VT1211_REG_PWM_CTL 0x51
#define VT1211_REG_PWM_AUTO_TEMP(ap) (0x55 - (ap))
#define VT1211_REG_PWM_AUTO_PWM(ix, ap) (0x58 + 2 * (ix) - (ap))
/* Miscellaneous registers */
#define VT1211_REG_CONFIG 0x40
#define VT1211_REG_ALARM1 0x41
#define VT1211_REG_ALARM2 0x42
#define VT1211_REG_VID 0x45
#define VT1211_REG_UCH_CONFIG 0x4a
#define VT1211_REG_TEMP1_CONFIG 0x4b
#define VT1211_REG_TEMP2_CONFIG 0x4c
/* In, temp & fan alarm bits */
static const u8 bitalarmin[] = {11, 0, 1, 3, 8, 2, 9};
static const u8 bitalarmtemp[] = {4, 15, 11, 0, 1, 3, 8};
static const u8 bitalarmfan[] = {6, 7};
/* ---------------------------------------------------------------------
* Data structures and manipulation thereof
* --------------------------------------------------------------------- */
struct vt1211_data {
unsigned short addr;
const char *name;
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
u8 in[6];
u8 in_max[6];
u8 in_min[6];
u8 temp[7];
u8 temp_max[7];
u8 temp_hyst[7];
u8 fan[2];
u8 fan_min[2];
u8 fan_div[2];
u8 fan_ctl;
u8 pwm[2];
u8 pwm_ctl[2];
u8 pwm_clk;
u8 pwm_auto_temp[4];
u8 pwm_auto_pwm[2][4];
u8 vid; /* Read once at init time */
u8 vrm;
u8 uch_config; /* Read once at init time */
u16 alarms;
};
/* ix = [0-5] */
#define ISVOLT(ix, uch_config) ((ix) > 4 ? 1 : \
!(((uch_config) >> ((ix) + 2)) & 1))
/* ix = [0-6] */
#define ISTEMP(ix, uch_config) ((ix) < 2 ? 1 : \
((uch_config) >> (ix)) & 1)
/* in5 (ix = 5) is special. It's the internal 3.3V so it's scaled in the
driver according to the VT1211 BIOS porting guide */
#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \
(((reg) - 3) * 15882 + 479) / 958 : \
(((reg) - 3) * 10000 + 479) / 958)
#define IN_TO_REG(ix, val) (SENSORS_LIMIT((ix) == 5 ? \
((val) * 958 + 7941) / 15882 + 3 : \
((val) * 958 + 5000) / 10000 + 3, 0, 255))
/* temp1 (ix = 0) is an intel thermal diode which is scaled in user space.
temp2 (ix = 1) is the internal temp diode so it's scaled in the driver
according to some measurements that I took on an EPIA M10000.
temp3-7 are thermistor based so the driver returns the voltage measured at
the pin (range 0V - 2.2V). */
#define TEMP_FROM_REG(ix, reg) ((ix) == 0 ? (reg) * 1000 : \
(ix) == 1 ? (reg) < 51 ? 0 : \
((reg) - 51) * 1000 : \
((253 - (reg)) * 2200 + 105) / 210)
#define TEMP_TO_REG(ix, val) SENSORS_LIMIT( \
((ix) == 0 ? ((val) + 500) / 1000 : \
(ix) == 1 ? ((val) + 500) / 1000 + 51 : \
253 - ((val) * 210 + 1100) / 2200), 0, 255)
#define DIV_FROM_REG(reg) (1 << (reg))
#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
1310720 / (reg) / DIV_FROM_REG(div))
#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \
SENSORS_LIMIT((1310720 / (val) / \
DIV_FROM_REG(div)), 1, 254))
/* ---------------------------------------------------------------------
* Super-I/O constants and functions
* --------------------------------------------------------------------- */
/* Configuration index port registers
* The vt1211 can live at 2 different addresses so we need to probe both */
#define SIO_REG_CIP1 0x2e
#define SIO_REG_CIP2 0x4e
/* Configuration registers */
#define SIO_VT1211_LDN 0x07 /* logical device number */
#define SIO_VT1211_DEVID 0x20 /* device ID */
#define SIO_VT1211_DEVREV 0x21 /* device revision */
#define SIO_VT1211_ACTIVE 0x30 /* HW monitor active */
#define SIO_VT1211_BADDR 0x60 /* base I/O address */
#define SIO_VT1211_ID 0x3c /* VT1211 device ID */
/* VT1211 logical device numbers */
#define SIO_VT1211_LDN_HWMON 0x0b /* HW monitor */
static inline void superio_outb(int sio_cip, int reg, int val)
{
outb(reg, sio_cip);
outb(val, sio_cip + 1);
}
static inline int superio_inb(int sio_cip, int reg)
{
outb(reg, sio_cip);
return inb(sio_cip + 1);
}
static inline void superio_select(int sio_cip, int ldn)
{
outb(SIO_VT1211_LDN, sio_cip);
outb(ldn, sio_cip + 1);
}
static inline void superio_enter(int sio_cip)
{
outb(0x87, sio_cip);
outb(0x87, sio_cip);
}
static inline void superio_exit(int sio_cip)
{
outb(0xaa, sio_cip);
}
/* ---------------------------------------------------------------------
* Device I/O access
* --------------------------------------------------------------------- */
static inline u8 vt1211_read8(struct vt1211_data *data, u8 reg)
{
return inb(data->addr + reg);
}
static inline void vt1211_write8(struct vt1211_data *data, u8 reg, u8 val)
{
outb(val, data->addr + reg);
}
static struct vt1211_data *vt1211_update_device(struct device *dev)
{
struct vt1211_data *data = dev_get_drvdata(dev);
int ix, val;
mutex_lock(&data->update_lock);
/* registers cache is refreshed after 1 second */
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
/* read VID */
data->vid = vt1211_read8(data, VT1211_REG_VID) & 0x1f;
/* voltage (in) registers */
for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
if (ISVOLT(ix, data->uch_config)) {
data->in[ix] = vt1211_read8(data,
VT1211_REG_IN(ix));
data->in_min[ix] = vt1211_read8(data,
VT1211_REG_IN_MIN(ix));
data->in_max[ix] = vt1211_read8(data,
VT1211_REG_IN_MAX(ix));
}
}
/* temp registers */
for (ix = 0; ix < ARRAY_SIZE(data->temp); ix++) {
if (ISTEMP(ix, data->uch_config)) {
data->temp[ix] = vt1211_read8(data,
regtemp[ix]);
data->temp_max[ix] = vt1211_read8(data,
regtempmax[ix]);
data->temp_hyst[ix] = vt1211_read8(data,
regtemphyst[ix]);
}
}
/* fan & pwm registers */
for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
data->fan[ix] = vt1211_read8(data,
VT1211_REG_FAN(ix));
data->fan_min[ix] = vt1211_read8(data,
VT1211_REG_FAN_MIN(ix));
data->pwm[ix] = vt1211_read8(data,
VT1211_REG_PWM(ix));
}
val = vt1211_read8(data, VT1211_REG_FAN_DIV);
data->fan_div[0] = (val >> 4) & 3;
data->fan_div[1] = (val >> 6) & 3;
data->fan_ctl = val & 0xf;
val = vt1211_read8(data, VT1211_REG_PWM_CTL);
data->pwm_ctl[0] = val & 0xf;
data->pwm_ctl[1] = (val >> 4) & 0xf;
data->pwm_clk = vt1211_read8(data, VT1211_REG_PWM_CLK);
/* pwm & temp auto point registers */
data->pwm_auto_pwm[0][1] = vt1211_read8(data,
VT1211_REG_PWM_AUTO_PWM(0, 1));
data->pwm_auto_pwm[0][2] = vt1211_read8(data,
VT1211_REG_PWM_AUTO_PWM(0, 2));
data->pwm_auto_pwm[1][1] = vt1211_read8(data,
VT1211_REG_PWM_AUTO_PWM(1, 1));
data->pwm_auto_pwm[1][2] = vt1211_read8(data,
VT1211_REG_PWM_AUTO_PWM(1, 2));
for (ix = 0; ix < ARRAY_SIZE(data->pwm_auto_temp); ix++) {
data->pwm_auto_temp[ix] = vt1211_read8(data,
VT1211_REG_PWM_AUTO_TEMP(ix));
}
/* alarm registers */
data->alarms = (vt1211_read8(data, VT1211_REG_ALARM2) << 8) |
vt1211_read8(data, VT1211_REG_ALARM1);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/* ---------------------------------------------------------------------
* Voltage sysfs interfaces
* ix = [0-5]
* --------------------------------------------------------------------- */
#define SHOW_IN_INPUT 0
#define SHOW_SET_IN_MIN 1
#define SHOW_SET_IN_MAX 2
#define SHOW_IN_ALARM 3
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
int res;
switch (fn) {
case SHOW_IN_INPUT:
res = IN_FROM_REG(ix, data->in[ix]);
break;
case SHOW_SET_IN_MIN:
res = IN_FROM_REG(ix, data->in_min[ix]);
break;
case SHOW_SET_IN_MAX:
res = IN_FROM_REG(ix, data->in_max[ix]);
break;
case SHOW_IN_ALARM:
res = (data->alarms >> bitalarmin[ix]) & 1;
break;
default:
res = 0;
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
return sprintf(buf, "%d\n", res);
}
static ssize_t set_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
switch (fn) {
case SHOW_SET_IN_MIN:
data->in_min[ix] = IN_TO_REG(ix, val);
vt1211_write8(data, VT1211_REG_IN_MIN(ix), data->in_min[ix]);
break;
case SHOW_SET_IN_MAX:
data->in_max[ix] = IN_TO_REG(ix, val);
vt1211_write8(data, VT1211_REG_IN_MAX(ix), data->in_max[ix]);
break;
default:
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
mutex_unlock(&data->update_lock);
return count;
}
/* ---------------------------------------------------------------------
* Temperature sysfs interfaces
* ix = [0-6]
* --------------------------------------------------------------------- */
#define SHOW_TEMP_INPUT 0
#define SHOW_SET_TEMP_MAX 1
#define SHOW_SET_TEMP_MAX_HYST 2
#define SHOW_TEMP_ALARM 3
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
int res;
switch (fn) {
case SHOW_TEMP_INPUT:
res = TEMP_FROM_REG(ix, data->temp[ix]);
break;
case SHOW_SET_TEMP_MAX:
res = TEMP_FROM_REG(ix, data->temp_max[ix]);
break;
case SHOW_SET_TEMP_MAX_HYST:
res = TEMP_FROM_REG(ix, data->temp_hyst[ix]);
break;
case SHOW_TEMP_ALARM:
res = (data->alarms >> bitalarmtemp[ix]) & 1;
break;
default:
res = 0;
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
return sprintf(buf, "%d\n", res);
}
static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
switch (fn) {
case SHOW_SET_TEMP_MAX:
data->temp_max[ix] = TEMP_TO_REG(ix, val);
vt1211_write8(data, regtempmax[ix],
data->temp_max[ix]);
break;
case SHOW_SET_TEMP_MAX_HYST:
data->temp_hyst[ix] = TEMP_TO_REG(ix, val);
vt1211_write8(data, regtemphyst[ix],
data->temp_hyst[ix]);
break;
default:
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
mutex_unlock(&data->update_lock);
return count;
}
/* ---------------------------------------------------------------------
* Fan sysfs interfaces
* ix = [0-1]
* --------------------------------------------------------------------- */
#define SHOW_FAN_INPUT 0
#define SHOW_SET_FAN_MIN 1
#define SHOW_SET_FAN_DIV 2
#define SHOW_FAN_ALARM 3
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
int res;
switch (fn) {
case SHOW_FAN_INPUT:
res = RPM_FROM_REG(data->fan[ix], data->fan_div[ix]);
break;
case SHOW_SET_FAN_MIN:
res = RPM_FROM_REG(data->fan_min[ix], data->fan_div[ix]);
break;
case SHOW_SET_FAN_DIV:
res = DIV_FROM_REG(data->fan_div[ix]);
break;
case SHOW_FAN_ALARM:
res = (data->alarms >> bitalarmfan[ix]) & 1;
break;
default:
res = 0;
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
return sprintf(buf, "%d\n", res);
}
static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
int reg;
mutex_lock(&data->update_lock);
/* sync the data cache */
reg = vt1211_read8(data, VT1211_REG_FAN_DIV);
data->fan_div[0] = (reg >> 4) & 3;
data->fan_div[1] = (reg >> 6) & 3;
data->fan_ctl = reg & 0xf;
switch (fn) {
case SHOW_SET_FAN_MIN:
data->fan_min[ix] = RPM_TO_REG(val, data->fan_div[ix]);
vt1211_write8(data, VT1211_REG_FAN_MIN(ix),
data->fan_min[ix]);
break;
case SHOW_SET_FAN_DIV:
switch (val) {
case 1: data->fan_div[ix] = 0; break;
case 2: data->fan_div[ix] = 1; break;
case 4: data->fan_div[ix] = 2; break;
case 8: data->fan_div[ix] = 3; break;
default:
count = -EINVAL;
dev_warn(dev, "fan div value %ld not "
"supported. Choose one of 1, 2, "
"4, or 8.\n", val);
goto EXIT;
}
vt1211_write8(data, VT1211_REG_FAN_DIV,
((data->fan_div[1] << 6) |
(data->fan_div[0] << 4) |
data->fan_ctl));
break;
default:
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
EXIT:
mutex_unlock(&data->update_lock);
return count;
}
/* ---------------------------------------------------------------------
* PWM sysfs interfaces
* ix = [0-1]
* --------------------------------------------------------------------- */
#define SHOW_PWM 0
#define SHOW_SET_PWM_ENABLE 1
#define SHOW_SET_PWM_FREQ 2
#define SHOW_SET_PWM_AUTO_CHANNELS_TEMP 3
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
int res;
switch (fn) {
case SHOW_PWM:
res = data->pwm[ix];
break;
case SHOW_SET_PWM_ENABLE:
res = ((data->pwm_ctl[ix] >> 3) & 1) ? 2 : 0;
break;
case SHOW_SET_PWM_FREQ:
res = 90000 >> (data->pwm_clk & 7);
break;
case SHOW_SET_PWM_AUTO_CHANNELS_TEMP:
res = (data->pwm_ctl[ix] & 7) + 1;
break;
default:
res = 0;
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
return sprintf(buf, "%d\n", res);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int fn = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
int tmp, reg;
mutex_lock(&data->update_lock);
switch (fn) {
case SHOW_SET_PWM_ENABLE:
/* sync the data cache */
reg = vt1211_read8(data, VT1211_REG_FAN_DIV);
data->fan_div[0] = (reg >> 4) & 3;
data->fan_div[1] = (reg >> 6) & 3;
data->fan_ctl = reg & 0xf;
reg = vt1211_read8(data, VT1211_REG_PWM_CTL);
data->pwm_ctl[0] = reg & 0xf;
data->pwm_ctl[1] = (reg >> 4) & 0xf;
switch (val) {
case 0:
data->pwm_ctl[ix] &= 7;
/* disable SmartGuardian if both PWM outputs are
* disabled */
if ((data->pwm_ctl[ix ^ 1] & 1) == 0) {
data->fan_ctl &= 0xe;
}
break;
case 2:
data->pwm_ctl[ix] |= 8;
data->fan_ctl |= 1;
break;
default:
count = -EINVAL;
dev_warn(dev, "pwm mode %ld not supported. "
"Choose one of 0 or 2.\n", val);
goto EXIT;
}
vt1211_write8(data, VT1211_REG_PWM_CTL,
((data->pwm_ctl[1] << 4) |
data->pwm_ctl[0]));
vt1211_write8(data, VT1211_REG_FAN_DIV,
((data->fan_div[1] << 6) |
(data->fan_div[0] << 4) |
data->fan_ctl));
break;
case SHOW_SET_PWM_FREQ:
val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000);
/* calculate tmp = log2(val) */
tmp = 0;
for (val >>= 1; val > 0; val >>= 1) {
tmp++;
}
/* sync the data cache */
reg = vt1211_read8(data, VT1211_REG_PWM_CLK);
data->pwm_clk = (reg & 0xf8) | tmp;
vt1211_write8(data, VT1211_REG_PWM_CLK, data->pwm_clk);
break;
case SHOW_SET_PWM_AUTO_CHANNELS_TEMP:
if ((val < 1) || (val > 7)) {
count = -EINVAL;
dev_warn(dev, "temp channel %ld not supported. "
"Choose a value between 1 and 7.\n", val);
goto EXIT;
}
if (!ISTEMP(val - 1, data->uch_config)) {
count = -EINVAL;
dev_warn(dev, "temp channel %ld is not available.\n",
val);
goto EXIT;
}
/* sync the data cache */
reg = vt1211_read8(data, VT1211_REG_PWM_CTL);
data->pwm_ctl[0] = reg & 0xf;
data->pwm_ctl[1] = (reg >> 4) & 0xf;
data->pwm_ctl[ix] = (data->pwm_ctl[ix] & 8) | (val - 1);
vt1211_write8(data, VT1211_REG_PWM_CTL,
((data->pwm_ctl[1] << 4) | data->pwm_ctl[0]));
break;
default:
dev_dbg(dev, "Unknown attr fetch (%d)\n", fn);
}
EXIT:
mutex_unlock(&data->update_lock);
return count;
}
/* ---------------------------------------------------------------------
* PWM auto point definitions
* ix = [0-1]
* ap = [0-3]
* --------------------------------------------------------------------- */
/*
* pwm[ix+1]_auto_point[ap+1]_temp mapping table:
* Note that there is only a single set of temp auto points that controls both
* PWM controllers. We still create 2 sets of sysfs files to make it look
* more consistent even though they map to the same registers.
*
* ix ap : description
* -------------------
* 0 0 : pwm1/2 off temperature (pwm_auto_temp[0])
* 0 1 : pwm1/2 low speed temperature (pwm_auto_temp[1])
* 0 2 : pwm1/2 high speed temperature (pwm_auto_temp[2])
* 0 3 : pwm1/2 full speed temperature (pwm_auto_temp[3])
* 1 0 : pwm1/2 off temperature (pwm_auto_temp[0])
* 1 1 : pwm1/2 low speed temperature (pwm_auto_temp[1])
* 1 2 : pwm1/2 high speed temperature (pwm_auto_temp[2])
* 1 3 : pwm1/2 full speed temperature (pwm_auto_temp[3])
*/
static ssize_t show_pwm_auto_point_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int ap = sensor_attr_2->nr;
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->pwm_ctl[ix] & 7,
data->pwm_auto_temp[ap]));
}
static ssize_t set_pwm_auto_point_temp(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int ap = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
int reg;
mutex_lock(&data->update_lock);
/* sync the data cache */
reg = vt1211_read8(data, VT1211_REG_PWM_CTL);
data->pwm_ctl[0] = reg & 0xf;
data->pwm_ctl[1] = (reg >> 4) & 0xf;
data->pwm_auto_temp[ap] = TEMP_TO_REG(data->pwm_ctl[ix] & 7, val);
vt1211_write8(data, VT1211_REG_PWM_AUTO_TEMP(ap),
data->pwm_auto_temp[ap]);
mutex_unlock(&data->update_lock);
return count;
}
/*
* pwm[ix+1]_auto_point[ap+1]_pwm mapping table:
* Note that the PWM auto points 0 & 3 are hard-wired in the VT1211 and can't
* be changed.
*
* ix ap : description
* -------------------
* 0 0 : pwm1 off (pwm_auto_pwm[0][0], hard-wired to 0)
* 0 1 : pwm1 low speed duty cycle (pwm_auto_pwm[0][1])
* 0 2 : pwm1 high speed duty cycle (pwm_auto_pwm[0][2])
* 0 3 : pwm1 full speed (pwm_auto_pwm[0][3], hard-wired to 255)
* 1 0 : pwm2 off (pwm_auto_pwm[1][0], hard-wired to 0)
* 1 1 : pwm2 low speed duty cycle (pwm_auto_pwm[1][1])
* 1 2 : pwm2 high speed duty cycle (pwm_auto_pwm[1][2])
* 1 3 : pwm2 full speed (pwm_auto_pwm[1][3], hard-wired to 255)
*/
static ssize_t show_pwm_auto_point_pwm(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int ap = sensor_attr_2->nr;
return sprintf(buf, "%d\n", data->pwm_auto_pwm[ix][ap]);
}
static ssize_t set_pwm_auto_point_pwm(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr_2 =
to_sensor_dev_attr_2(attr);
int ix = sensor_attr_2->index;
int ap = sensor_attr_2->nr;
long val = simple_strtol(buf, NULL, 10);
if ((val < 0) || (val > 255)) {
dev_err(dev, "pwm value %ld is out of range. "
"Choose a value between 0 and 255.\n" , val);
return -EINVAL;
}
mutex_lock(&data->update_lock);
data->pwm_auto_pwm[ix][ap] = val;
vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap),
data->pwm_auto_pwm[ix][ap]);
mutex_unlock(&data->update_lock);
return count;
}
/* ---------------------------------------------------------------------
* Miscellaneous sysfs interfaces (VRM, VID, name, and (legacy) alarms)
* --------------------------------------------------------------------- */
static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct vt1211_data *data = dev_get_drvdata(dev);
long val = simple_strtol(buf, NULL, 10);
data->vrm = val;
return count;
}
static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct vt1211_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static ssize_t show_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vt1211_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", data->name);
}
static ssize_t show_alarms(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vt1211_data *data = vt1211_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
/* ---------------------------------------------------------------------
* Device attribute structs
* --------------------------------------------------------------------- */
#define SENSOR_ATTR_IN_INPUT(ix) \
SENSOR_ATTR_2(in##ix##_input, S_IRUGO, \
show_in, NULL, SHOW_IN_INPUT, ix)
static struct sensor_device_attribute_2 vt1211_sysfs_in_input[] = {
SENSOR_ATTR_IN_INPUT(0),
SENSOR_ATTR_IN_INPUT(1),
SENSOR_ATTR_IN_INPUT(2),
SENSOR_ATTR_IN_INPUT(3),
SENSOR_ATTR_IN_INPUT(4),
SENSOR_ATTR_IN_INPUT(5),
};
#define SENSOR_ATTR_IN_MIN(ix) \
SENSOR_ATTR_2(in##ix##_min, S_IRUGO | S_IWUSR, \
show_in, set_in, SHOW_SET_IN_MIN, ix)
static struct sensor_device_attribute_2 vt1211_sysfs_in_min[] = {
SENSOR_ATTR_IN_MIN(0),
SENSOR_ATTR_IN_MIN(1),
SENSOR_ATTR_IN_MIN(2),
SENSOR_ATTR_IN_MIN(3),
SENSOR_ATTR_IN_MIN(4),
SENSOR_ATTR_IN_MIN(5),
};
#define SENSOR_ATTR_IN_MAX(ix) \
SENSOR_ATTR_2(in##ix##_max, S_IRUGO | S_IWUSR, \
show_in, set_in, SHOW_SET_IN_MAX, ix)
static struct sensor_device_attribute_2 vt1211_sysfs_in_max[] = {
SENSOR_ATTR_IN_MAX(0),
SENSOR_ATTR_IN_MAX(1),
SENSOR_ATTR_IN_MAX(2),
SENSOR_ATTR_IN_MAX(3),
SENSOR_ATTR_IN_MAX(4),
SENSOR_ATTR_IN_MAX(5),
};
#define SENSOR_ATTR_IN_ALARM(ix) \
SENSOR_ATTR_2(in##ix##_alarm, S_IRUGO, \
show_in, NULL, SHOW_IN_ALARM, ix)
static struct sensor_device_attribute_2 vt1211_sysfs_in_alarm[] = {
SENSOR_ATTR_IN_ALARM(0),
SENSOR_ATTR_IN_ALARM(1),
SENSOR_ATTR_IN_ALARM(2),
SENSOR_ATTR_IN_ALARM(3),
SENSOR_ATTR_IN_ALARM(4),
SENSOR_ATTR_IN_ALARM(5),
};
#define SENSOR_ATTR_TEMP_INPUT(ix) \
SENSOR_ATTR_2(temp##ix##_input, S_IRUGO, \
show_temp, NULL, SHOW_TEMP_INPUT, ix-1)
static struct sensor_device_attribute_2 vt1211_sysfs_temp_input[] = {
SENSOR_ATTR_TEMP_INPUT(1),
SENSOR_ATTR_TEMP_INPUT(2),
SENSOR_ATTR_TEMP_INPUT(3),
SENSOR_ATTR_TEMP_INPUT(4),
SENSOR_ATTR_TEMP_INPUT(5),
SENSOR_ATTR_TEMP_INPUT(6),
SENSOR_ATTR_TEMP_INPUT(7),
};
#define SENSOR_ATTR_TEMP_MAX(ix) \
SENSOR_ATTR_2(temp##ix##_max, S_IRUGO | S_IWUSR, \
show_temp, set_temp, SHOW_SET_TEMP_MAX, ix-1)
static struct sensor_device_attribute_2 vt1211_sysfs_temp_max[] = {
SENSOR_ATTR_TEMP_MAX(1),
SENSOR_ATTR_TEMP_MAX(2),
SENSOR_ATTR_TEMP_MAX(3),
SENSOR_ATTR_TEMP_MAX(4),
SENSOR_ATTR_TEMP_MAX(5),
SENSOR_ATTR_TEMP_MAX(6),
SENSOR_ATTR_TEMP_MAX(7),
};
#define SENSOR_ATTR_TEMP_MAX_HYST(ix) \
SENSOR_ATTR_2(temp##ix##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp, set_temp, SHOW_SET_TEMP_MAX_HYST, ix-1)
static struct sensor_device_attribute_2 vt1211_sysfs_temp_max_hyst[] = {
SENSOR_ATTR_TEMP_MAX_HYST(1),
SENSOR_ATTR_TEMP_MAX_HYST(2),
SENSOR_ATTR_TEMP_MAX_HYST(3),
SENSOR_ATTR_TEMP_MAX_HYST(4),
SENSOR_ATTR_TEMP_MAX_HYST(5),
SENSOR_ATTR_TEMP_MAX_HYST(6),
SENSOR_ATTR_TEMP_MAX_HYST(7),
};
#define SENSOR_ATTR_TEMP_ALARM(ix) \
SENSOR_ATTR_2(temp##ix##_alarm, S_IRUGO, \
show_temp, NULL, SHOW_TEMP_ALARM, ix-1)
static struct sensor_device_attribute_2 vt1211_sysfs_temp_alarm[] = {
SENSOR_ATTR_TEMP_ALARM(1),
SENSOR_ATTR_TEMP_ALARM(2),
SENSOR_ATTR_TEMP_ALARM(3),
SENSOR_ATTR_TEMP_ALARM(4),
SENSOR_ATTR_TEMP_ALARM(5),
SENSOR_ATTR_TEMP_ALARM(6),
SENSOR_ATTR_TEMP_ALARM(7),
};
#define SENSOR_ATTR_FAN(ix) \
SENSOR_ATTR_2(fan##ix##_input, S_IRUGO, \
show_fan, NULL, SHOW_FAN_INPUT, ix-1), \
SENSOR_ATTR_2(fan##ix##_min, S_IRUGO | S_IWUSR, \
show_fan, set_fan, SHOW_SET_FAN_MIN, ix-1), \
SENSOR_ATTR_2(fan##ix##_div, S_IRUGO | S_IWUSR, \
show_fan, set_fan, SHOW_SET_FAN_DIV, ix-1), \
SENSOR_ATTR_2(fan##ix##_alarm, S_IRUGO, \
show_fan, NULL, SHOW_FAN_ALARM, ix-1)
#define SENSOR_ATTR_PWM(ix) \
SENSOR_ATTR_2(pwm##ix, S_IRUGO, \
show_pwm, NULL, SHOW_PWM, ix-1), \
SENSOR_ATTR_2(pwm##ix##_enable, S_IRUGO | S_IWUSR, \
show_pwm, set_pwm, SHOW_SET_PWM_ENABLE, ix-1), \
SENSOR_ATTR_2(pwm##ix##_auto_channels_temp, S_IRUGO | S_IWUSR, \
show_pwm, set_pwm, SHOW_SET_PWM_AUTO_CHANNELS_TEMP, ix-1)
#define SENSOR_ATTR_PWM_FREQ(ix) \
SENSOR_ATTR_2(pwm##ix##_freq, S_IRUGO | S_IWUSR, \
show_pwm, set_pwm, SHOW_SET_PWM_FREQ, ix-1)
#define SENSOR_ATTR_PWM_FREQ_RO(ix) \
SENSOR_ATTR_2(pwm##ix##_freq, S_IRUGO, \
show_pwm, NULL, SHOW_SET_PWM_FREQ, ix-1)
#define SENSOR_ATTR_PWM_AUTO_POINT_TEMP(ix, ap) \
SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO | S_IWUSR, \
show_pwm_auto_point_temp, set_pwm_auto_point_temp, \
ap-1, ix-1)
#define SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(ix, ap) \
SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_temp, S_IRUGO, \
show_pwm_auto_point_temp, NULL, \
ap-1, ix-1)
#define SENSOR_ATTR_PWM_AUTO_POINT_PWM(ix, ap) \
SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO | S_IWUSR, \
show_pwm_auto_point_pwm, set_pwm_auto_point_pwm, \
ap-1, ix-1)
#define SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(ix, ap) \
SENSOR_ATTR_2(pwm##ix##_auto_point##ap##_pwm, S_IRUGO, \
show_pwm_auto_point_pwm, NULL, \
ap-1, ix-1)
static struct sensor_device_attribute_2 vt1211_sysfs_fan_pwm[] = {
SENSOR_ATTR_FAN(1),
SENSOR_ATTR_FAN(2),
SENSOR_ATTR_PWM(1),
SENSOR_ATTR_PWM(2),
SENSOR_ATTR_PWM_FREQ(1),
SENSOR_ATTR_PWM_FREQ_RO(2),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 1),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 2),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 3),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP(1, 4),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 1),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 2),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 3),
SENSOR_ATTR_PWM_AUTO_POINT_TEMP_RO(2, 4),
SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(1, 1),
SENSOR_ATTR_PWM_AUTO_POINT_PWM(1, 2),
SENSOR_ATTR_PWM_AUTO_POINT_PWM(1, 3),
SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(1, 4),
SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(2, 1),
SENSOR_ATTR_PWM_AUTO_POINT_PWM(2, 2),
SENSOR_ATTR_PWM_AUTO_POINT_PWM(2, 3),
SENSOR_ATTR_PWM_AUTO_POINT_PWM_RO(2, 4),
};
static struct device_attribute vt1211_sysfs_misc[] = {
__ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm),
__ATTR(cpu0_vid, S_IRUGO, show_vid, NULL),
__ATTR(name, S_IRUGO, show_name, NULL),
__ATTR(alarms, S_IRUGO, show_alarms, NULL),
};
/* ---------------------------------------------------------------------
* Device registration and initialization
* --------------------------------------------------------------------- */
static void __devinit vt1211_init_device(struct vt1211_data *data)
{
/* set VRM */
data->vrm = vid_which_vrm();
/* Read (and initialize) UCH config */
data->uch_config = vt1211_read8(data, VT1211_REG_UCH_CONFIG);
if (uch_config > -1) {
data->uch_config = (data->uch_config & 0x83) |
(uch_config << 2);
vt1211_write8(data, VT1211_REG_UCH_CONFIG, data->uch_config);
}
/* Initialize the interrupt mode (if request at module load time).
* The VT1211 implements 3 different modes for clearing interrupts:
* 0: Clear INT when status register is read. Regenerate INT as long
* as temp stays above hysteresis limit.
* 1: Clear INT when status register is read. DON'T regenerate INT
* until temp falls below hysteresis limit and exceeds hot limit
* again.
* 2: Clear INT when temp falls below max limit.
*
* The driver only allows to force mode 0 since that's the only one
* that makes sense for 'sensors' */
if (int_mode == 0) {
vt1211_write8(data, VT1211_REG_TEMP1_CONFIG, 0);
vt1211_write8(data, VT1211_REG_TEMP2_CONFIG, 0);
}
/* Fill in some hard wired values into our data struct */
data->pwm_auto_pwm[0][3] = 255;
data->pwm_auto_pwm[1][3] = 255;
}
static void vt1211_remove_sysfs(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int i;
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_in_input); i++) {
device_remove_file(dev,
&vt1211_sysfs_in_input[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_in_min[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_in_max[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_in_alarm[i].dev_attr);
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_temp_input); i++) {
device_remove_file(dev,
&vt1211_sysfs_temp_input[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_temp_max[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_temp_max_hyst[i].dev_attr);
device_remove_file(dev,
&vt1211_sysfs_temp_alarm[i].dev_attr);
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_fan_pwm); i++) {
device_remove_file(dev,
&vt1211_sysfs_fan_pwm[i].dev_attr);
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_misc); i++) {
device_remove_file(dev, &vt1211_sysfs_misc[i]);
}
}
static int __devinit vt1211_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vt1211_data *data;
struct resource *res;
int i, err;
if (!(data = kzalloc(sizeof(struct vt1211_data), GFP_KERNEL))) {
err = -ENOMEM;
dev_err(dev, "Out of memory\n");
goto EXIT;
}
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!request_region(res->start, resource_size(res), DRVNAME)) {
err = -EBUSY;
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start, (unsigned long)res->end);
goto EXIT_KFREE;
}
data->addr = res->start;
data->name = DRVNAME;
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
/* Initialize the VT1211 chip */
vt1211_init_device(data);
/* Create sysfs interface files */
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_in_input); i++) {
if (ISVOLT(i, data->uch_config)) {
if ((err = device_create_file(dev,
&vt1211_sysfs_in_input[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_in_min[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_in_max[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_in_alarm[i].dev_attr))) {
goto EXIT_DEV_REMOVE;
}
}
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_temp_input); i++) {
if (ISTEMP(i, data->uch_config)) {
if ((err = device_create_file(dev,
&vt1211_sysfs_temp_input[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_temp_max[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_temp_max_hyst[i].dev_attr)) ||
(err = device_create_file(dev,
&vt1211_sysfs_temp_alarm[i].dev_attr))) {
goto EXIT_DEV_REMOVE;
}
}
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_fan_pwm); i++) {
err = device_create_file(dev,
&vt1211_sysfs_fan_pwm[i].dev_attr);
if (err) {
goto EXIT_DEV_REMOVE;
}
}
for (i = 0; i < ARRAY_SIZE(vt1211_sysfs_misc); i++) {
err = device_create_file(dev,
&vt1211_sysfs_misc[i]);
if (err) {
goto EXIT_DEV_REMOVE;
}
}
/* Register device */
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
dev_err(dev, "Class registration failed (%d)\n", err);
goto EXIT_DEV_REMOVE_SILENT;
}
return 0;
EXIT_DEV_REMOVE:
dev_err(dev, "Sysfs interface creation failed (%d)\n", err);
EXIT_DEV_REMOVE_SILENT:
vt1211_remove_sysfs(pdev);
release_region(res->start, resource_size(res));
EXIT_KFREE:
platform_set_drvdata(pdev, NULL);
kfree(data);
EXIT:
return err;
}
static int __devexit vt1211_remove(struct platform_device *pdev)
{
struct vt1211_data *data = platform_get_drvdata(pdev);
struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
vt1211_remove_sysfs(pdev);
platform_set_drvdata(pdev, NULL);
kfree(data);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, resource_size(res));
return 0;
}
static struct platform_driver vt1211_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = vt1211_probe,
.remove = __devexit_p(vt1211_remove),
};
static int __init vt1211_device_add(unsigned short address)
{
struct resource res = {
.start = address,
.end = address + 0x7f,
.flags = IORESOURCE_IO,
};
int err;
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed (%d)\n", err);
goto EXIT;
}
res.name = pdev->name;
err = acpi_check_resource_conflict(&res);
if (err)
goto EXIT_DEV_PUT;
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
pr_err("Device resource addition failed (%d)\n", err);
goto EXIT_DEV_PUT;
}
err = platform_device_add(pdev);
if (err) {
pr_err("Device addition failed (%d)\n", err);
goto EXIT_DEV_PUT;
}
return 0;
EXIT_DEV_PUT:
platform_device_put(pdev);
EXIT:
return err;
}
static int __init vt1211_find(int sio_cip, unsigned short *address)
{
int err = -ENODEV;
int devid;
superio_enter(sio_cip);
devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
if (devid != SIO_VT1211_ID) {
goto EXIT;
}
superio_select(sio_cip, SIO_VT1211_LDN_HWMON);
if ((superio_inb(sio_cip, SIO_VT1211_ACTIVE) & 1) == 0) {
pr_warn("HW monitor is disabled, skipping\n");
goto EXIT;
}
*address = ((superio_inb(sio_cip, SIO_VT1211_BADDR) << 8) |
(superio_inb(sio_cip, SIO_VT1211_BADDR + 1))) & 0xff00;
if (*address == 0) {
pr_warn("Base address is not set, skipping\n");
goto EXIT;
}
err = 0;
pr_info("Found VT1211 chip at 0x%04x, revision %u\n",
*address, superio_inb(sio_cip, SIO_VT1211_DEVREV));
EXIT:
superio_exit(sio_cip);
return err;
}
static int __init vt1211_init(void)
{
int err;
unsigned short address = 0;
if ((err = vt1211_find(SIO_REG_CIP1, &address)) &&
(err = vt1211_find(SIO_REG_CIP2, &address))) {
goto EXIT;
}
if ((uch_config < -1) || (uch_config > 31)) {
err = -EINVAL;
pr_warn("Invalid UCH configuration %d. "
"Choose a value between 0 and 31.\n", uch_config);
goto EXIT;
}
if ((int_mode < -1) || (int_mode > 0)) {
err = -EINVAL;
pr_warn("Invalid interrupt mode %d. "
"Only mode 0 is supported.\n", int_mode);
goto EXIT;
}
err = platform_driver_register(&vt1211_driver);
if (err) {
goto EXIT;
}
/* Sets global pdev as a side effect */
err = vt1211_device_add(address);
if (err) {
goto EXIT_DRV_UNREGISTER;
}
return 0;
EXIT_DRV_UNREGISTER:
platform_driver_unregister(&vt1211_driver);
EXIT:
return err;
}
static void __exit vt1211_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&vt1211_driver);
}
MODULE_AUTHOR("Juerg Haefliger <juergh@gmail.com>");
MODULE_DESCRIPTION("VT1211 sensors");
MODULE_LICENSE("GPL");
module_init(vt1211_init);
module_exit(vt1211_exit);
| gpl-2.0 |
Altaf-Mahdi/flo | drivers/input/touchscreen/jornada720_ts.c | 4826 | 4650 | /*
* drivers/input/touchscreen/jornada720_ts.c
*
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
*
* Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl>
* based on HP Jornada 56x touchscreen driver by Alex Lange <chicken@handhelds.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* HP Jornada 710/720/729 Touchscreen Driver
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <mach/hardware.h>
#include <mach/jornada720.h>
#include <mach/irqs.h>
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 touchscreen driver");
MODULE_LICENSE("GPL v2");
struct jornada_ts {
struct input_dev *dev;
int x_data[4]; /* X sample values */
int y_data[4]; /* Y sample values */
};
static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts)
{
/* 3 low word X samples */
jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY);
jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY);
jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY);
/* 3 low word Y samples */
jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY);
jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY);
jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY);
/* combined x samples bits */
jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY);
/* combined y samples bits */
jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY);
}
static int jornada720_ts_average(int coords[4])
{
int coord, high_bits = coords[3];
coord = coords[0] | ((high_bits & 0x03) << 8);
coord += coords[1] | ((high_bits & 0x0c) << 6);
coord += coords[2] | ((high_bits & 0x30) << 4);
return coord / 3;
}
static irqreturn_t jornada720_ts_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
struct input_dev *input = jornada_ts->dev;
int x, y;
/* If GPIO_GPIO9 is set to high then report pen up */
if (GPLR & GPIO_GPIO(9)) {
input_report_key(input, BTN_TOUCH, 0);
input_sync(input);
} else {
jornada_ssp_start();
/* proper reply to request is always TXDUMMY */
if (jornada_ssp_inout(GETTOUCHSAMPLES) == TXDUMMY) {
jornada720_ts_collect_data(jornada_ts);
x = jornada720_ts_average(jornada_ts->x_data);
y = jornada720_ts_average(jornada_ts->y_data);
input_report_key(input, BTN_TOUCH, 1);
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_sync(input);
}
jornada_ssp_end();
}
return IRQ_HANDLED;
}
static int __devinit jornada720_ts_probe(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts;
struct input_dev *input_dev;
int error;
jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!jornada_ts || !input_dev) {
error = -ENOMEM;
goto fail1;
}
platform_set_drvdata(pdev, jornada_ts);
jornada_ts->dev = input_dev;
input_dev->name = "HP Jornada 7xx Touchscreen";
input_dev->phys = "jornadats/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0);
error = request_irq(IRQ_GPIO9,
jornada720_ts_interrupt,
IRQF_TRIGGER_RISING,
"HP7XX Touchscreen driver", pdev);
if (error) {
printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n");
goto fail1;
}
error = input_register_device(jornada_ts->dev);
if (error)
goto fail2;
return 0;
fail2:
free_irq(IRQ_GPIO9, pdev);
fail1:
platform_set_drvdata(pdev, NULL);
input_free_device(input_dev);
kfree(jornada_ts);
return error;
}
static int __devexit jornada720_ts_remove(struct platform_device *pdev)
{
struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
free_irq(IRQ_GPIO9, pdev);
platform_set_drvdata(pdev, NULL);
input_unregister_device(jornada_ts->dev);
kfree(jornada_ts);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:jornada_ts");
static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
.remove = __devexit_p(jornada720_ts_remove),
.driver = {
.name = "jornada_ts",
.owner = THIS_MODULE,
},
};
module_platform_driver(jornada720_ts_driver);
| gpl-2.0 |
u-ra/android_kernel_htc_msm8660 | mm/mm_init.c | 4826 | 3837 | /*
* mm_init.c - Memory initialisation verification and debugging
*
* Copyright 2008 IBM Corporation, 2008
* Author Mel Gorman <mel@csn.ul.ie>
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include "internal.h"
#ifdef CONFIG_DEBUG_MEMORY_INIT
int mminit_loglevel;
#ifndef SECTIONS_SHIFT
#define SECTIONS_SHIFT 0
#endif
/* The zonelists are simply reported, validation is manual. */
void mminit_verify_zonelist(void)
{
int nid;
if (mminit_loglevel < MMINIT_VERIFY)
return;
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
struct zone *zone;
struct zoneref *z;
struct zonelist *zonelist;
int i, listid, zoneid;
BUG_ON(MAX_ZONELISTS > 2);
for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
/* Identify the zone and nodelist */
zoneid = i % MAX_NR_ZONES;
listid = i / MAX_NR_ZONES;
zonelist = &pgdat->node_zonelists[listid];
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
/* Print information about the zonelist */
printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
listid > 0 ? "thisnode" : "general", nid,
zone->name);
/* Iterate the zonelist */
for_each_zone_zonelist(zone, z, zonelist, zoneid) {
#ifdef CONFIG_NUMA
printk(KERN_CONT "%d:%s ",
zone->node, zone->name);
#else
printk(KERN_CONT "0:%s ", zone->name);
#endif /* CONFIG_NUMA */
}
printk(KERN_CONT "\n");
}
}
}
void __init mminit_verify_pageflags_layout(void)
{
int shift, width;
unsigned long or_mask, add_mask;
shift = 8 * sizeof(unsigned long);
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH;
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
"Section %d Node %d Zone %d Flags %d\n",
SECTIONS_WIDTH,
NODES_WIDTH,
ZONES_WIDTH,
NR_PAGEFLAGS);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
"Section %d Node %d Zone %d\n",
SECTIONS_SHIFT,
NODES_SHIFT,
ZONES_SHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_offsets",
"Section %lu Node %lu Zone %lu\n",
(unsigned long)SECTIONS_PGSHIFT,
(unsigned long)NODES_PGSHIFT,
(unsigned long)ZONES_PGSHIFT);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_zoneid",
"Zone ID: %lu -> %lu\n",
(unsigned long)ZONEID_PGOFF,
(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT));
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
"location: %d -> %d unused %d -> %d flags %d -> %d\n",
shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
#ifdef NODE_NOT_IN_PAGE_FLAGS
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
"Node not in page flags");
#endif
if (SECTIONS_WIDTH) {
shift -= SECTIONS_WIDTH;
BUG_ON(shift != SECTIONS_PGSHIFT);
}
if (NODES_WIDTH) {
shift -= NODES_WIDTH;
BUG_ON(shift != NODES_PGSHIFT);
}
if (ZONES_WIDTH) {
shift -= ZONES_WIDTH;
BUG_ON(shift != ZONES_PGSHIFT);
}
/* Check for bitmask overlaps */
or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
(NODES_MASK << NODES_PGSHIFT) |
(SECTIONS_MASK << SECTIONS_PGSHIFT);
add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
(NODES_MASK << NODES_PGSHIFT) +
(SECTIONS_MASK << SECTIONS_PGSHIFT);
BUG_ON(or_mask != add_mask);
}
void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
unsigned long nid, unsigned long pfn)
{
BUG_ON(page_to_nid(page) != nid);
BUG_ON(page_zonenum(page) != zone);
BUG_ON(page_to_pfn(page) != pfn);
}
static __init int set_mminit_loglevel(char *str)
{
get_option(&str, &mminit_loglevel);
return 0;
}
early_param("mminit_loglevel", set_mminit_loglevel);
#endif /* CONFIG_DEBUG_MEMORY_INIT */
struct kobject *mm_kobj;
EXPORT_SYMBOL_GPL(mm_kobj);
static int __init mm_sysfs_init(void)
{
mm_kobj = kobject_create_and_add("mm", kernel_kobj);
if (!mm_kobj)
return -ENOMEM;
return 0;
}
__initcall(mm_sysfs_init);
| gpl-2.0 |
brindev/bugfree-wookie | drivers/media/video/cx25840/cx25840-core.c | 4826 | 214339 | /* cx25840 - Conexant CX25840 audio/video decoder driver
*
* Copyright (C) 2004 Ulf Eklund
*
* Based on the saa7115 driver and on the first version of Chris Kennedy's
* cx25840 driver.
*
* Changes by Tyler Trafford <tatrafford@comcast.net>
* - cleanup/rewrite for V4L2 API (2005)
*
* VBI support by Hans Verkuil <hverkuil@xs4all.nl>.
*
* NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca>
* with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>.
*
* CX23885 support by Steven Toth <stoth@linuxtv.org>.
*
* CX2388[578] IRQ handling, IO Pin mux configuration and other small fixes are
* Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
*
* CX23888 DIF support for the HVR1850
* Copyright (C) 2011 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/math64.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
#include <media/cx25840.h>
#include "cx25840-core.h"
MODULE_DESCRIPTION("Conexant CX25840 audio/video decoder driver");
MODULE_AUTHOR("Ulf Eklund, Chris Kennedy, Hans Verkuil, Tyler Trafford");
MODULE_LICENSE("GPL");
#define CX25840_VID_INT_STAT_REG 0x410
#define CX25840_VID_INT_STAT_BITS 0x0000ffff
#define CX25840_VID_INT_MASK_BITS 0xffff0000
#define CX25840_VID_INT_MASK_SHFT 16
#define CX25840_VID_INT_MASK_REG 0x412
#define CX23885_AUD_MC_INT_MASK_REG 0x80c
#define CX23885_AUD_MC_INT_STAT_BITS 0xffff0000
#define CX23885_AUD_MC_INT_CTRL_BITS 0x0000ffff
#define CX23885_AUD_MC_INT_STAT_SHFT 16
#define CX25840_AUD_INT_CTRL_REG 0x812
#define CX25840_AUD_INT_STAT_REG 0x813
#define CX23885_PIN_CTRL_IRQ_REG 0x123
#define CX23885_PIN_CTRL_IRQ_IR_STAT 0x40
#define CX23885_PIN_CTRL_IRQ_AUD_STAT 0x20
#define CX23885_PIN_CTRL_IRQ_VID_STAT 0x10
#define CX25840_IR_STATS_REG 0x210
#define CX25840_IR_IRQEN_REG 0x214
static int cx25840_debug;
module_param_named(debug,cx25840_debug, int, 0644);
MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
/* ----------------------------------------------------------------------- */
static void cx23885_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
u8 buffer[3];
buffer[0] = addr >> 8;
buffer[1] = addr & 0xff;
buffer[2] = value;
return i2c_master_send(client, buffer, 3);
}
int cx25840_write4(struct i2c_client *client, u16 addr, u32 value)
{
u8 buffer[6];
buffer[0] = addr >> 8;
buffer[1] = addr & 0xff;
buffer[2] = value & 0xff;
buffer[3] = (value >> 8) & 0xff;
buffer[4] = (value >> 16) & 0xff;
buffer[5] = value >> 24;
return i2c_master_send(client, buffer, 6);
}
u8 cx25840_read(struct i2c_client * client, u16 addr)
{
struct i2c_msg msgs[2];
u8 tx_buf[2], rx_buf[1];
/* Write register address */
tx_buf[0] = addr >> 8;
tx_buf[1] = addr & 0xff;
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 2;
msgs[0].buf = (char *) tx_buf;
/* Read data from register */
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = 1;
msgs[1].buf = (char *) rx_buf;
if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
return rx_buf[0];
}
u32 cx25840_read4(struct i2c_client * client, u16 addr)
{
struct i2c_msg msgs[2];
u8 tx_buf[2], rx_buf[4];
/* Write register address */
tx_buf[0] = addr >> 8;
tx_buf[1] = addr & 0xff;
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 2;
msgs[0].buf = (char *) tx_buf;
/* Read data from registers */
msgs[1].addr = client->addr;
msgs[1].flags = I2C_M_RD;
msgs[1].len = 4;
msgs[1].buf = (char *) rx_buf;
if (i2c_transfer(client->adapter, msgs, 2) < 2)
return 0;
return (rx_buf[3] << 24) | (rx_buf[2] << 16) | (rx_buf[1] << 8) |
rx_buf[0];
}
int cx25840_and_or(struct i2c_client *client, u16 addr, unsigned and_mask,
u8 or_value)
{
return cx25840_write(client, addr,
(cx25840_read(client, addr) & and_mask) |
or_value);
}
int cx25840_and_or4(struct i2c_client *client, u16 addr, u32 and_mask,
u32 or_value)
{
return cx25840_write4(client, addr,
(cx25840_read4(client, addr) & and_mask) |
or_value);
}
/* ----------------------------------------------------------------------- */
static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
enum cx25840_audio_input aud_input);
/* ----------------------------------------------------------------------- */
static int cx23885_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
struct v4l2_subdev_io_pin_config *p)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int i;
u32 pin_ctrl;
u8 gpio_oe, gpio_data, strength;
pin_ctrl = cx25840_read4(client, 0x120);
gpio_oe = cx25840_read(client, 0x160);
gpio_data = cx25840_read(client, 0x164);
for (i = 0; i < n; i++) {
strength = p[i].strength;
if (strength > CX25840_PIN_DRIVE_FAST)
strength = CX25840_PIN_DRIVE_FAST;
switch (p[i].pin) {
case CX23885_PIN_IRQ_N_GPIO16:
if (p[i].function != CX23885_PAD_IRQ_N) {
/* GPIO16 */
pin_ctrl &= ~(0x1 << 25);
} else {
/* IRQ_N */
if (p[i].flags &
(V4L2_SUBDEV_IO_PIN_DISABLE |
V4L2_SUBDEV_IO_PIN_INPUT)) {
pin_ctrl &= ~(0x1 << 25);
} else {
pin_ctrl |= (0x1 << 25);
}
if (p[i].flags &
V4L2_SUBDEV_IO_PIN_ACTIVE_LOW) {
pin_ctrl &= ~(0x1 << 24);
} else {
pin_ctrl |= (0x1 << 24);
}
}
break;
case CX23885_PIN_IR_RX_GPIO19:
if (p[i].function != CX23885_PAD_GPIO19) {
/* IR_RX */
gpio_oe |= (0x1 << 0);
pin_ctrl &= ~(0x3 << 18);
pin_ctrl |= (strength << 18);
} else {
/* GPIO19 */
gpio_oe &= ~(0x1 << 0);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
gpio_data &= ~(0x1 << 0);
gpio_data |= ((p[i].value & 0x1) << 0);
}
pin_ctrl &= ~(0x3 << 12);
pin_ctrl |= (strength << 12);
}
break;
case CX23885_PIN_IR_TX_GPIO20:
if (p[i].function != CX23885_PAD_GPIO20) {
/* IR_TX */
gpio_oe |= (0x1 << 1);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_DISABLE)
pin_ctrl &= ~(0x1 << 10);
else
pin_ctrl |= (0x1 << 10);
pin_ctrl &= ~(0x3 << 18);
pin_ctrl |= (strength << 18);
} else {
/* GPIO20 */
gpio_oe &= ~(0x1 << 1);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
gpio_data &= ~(0x1 << 1);
gpio_data |= ((p[i].value & 0x1) << 1);
}
pin_ctrl &= ~(0x3 << 12);
pin_ctrl |= (strength << 12);
}
break;
case CX23885_PIN_I2S_SDAT_GPIO21:
if (p[i].function != CX23885_PAD_GPIO21) {
/* I2S_SDAT */
/* TODO: Input or Output config */
gpio_oe |= (0x1 << 2);
pin_ctrl &= ~(0x3 << 22);
pin_ctrl |= (strength << 22);
} else {
/* GPIO21 */
gpio_oe &= ~(0x1 << 2);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
gpio_data &= ~(0x1 << 2);
gpio_data |= ((p[i].value & 0x1) << 2);
}
pin_ctrl &= ~(0x3 << 12);
pin_ctrl |= (strength << 12);
}
break;
case CX23885_PIN_I2S_WCLK_GPIO22:
if (p[i].function != CX23885_PAD_GPIO22) {
/* I2S_WCLK */
/* TODO: Input or Output config */
gpio_oe |= (0x1 << 3);
pin_ctrl &= ~(0x3 << 22);
pin_ctrl |= (strength << 22);
} else {
/* GPIO22 */
gpio_oe &= ~(0x1 << 3);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
gpio_data &= ~(0x1 << 3);
gpio_data |= ((p[i].value & 0x1) << 3);
}
pin_ctrl &= ~(0x3 << 12);
pin_ctrl |= (strength << 12);
}
break;
case CX23885_PIN_I2S_BCLK_GPIO23:
if (p[i].function != CX23885_PAD_GPIO23) {
/* I2S_BCLK */
/* TODO: Input or Output config */
gpio_oe |= (0x1 << 4);
pin_ctrl &= ~(0x3 << 22);
pin_ctrl |= (strength << 22);
} else {
/* GPIO23 */
gpio_oe &= ~(0x1 << 4);
if (p[i].flags & V4L2_SUBDEV_IO_PIN_SET_VALUE) {
gpio_data &= ~(0x1 << 4);
gpio_data |= ((p[i].value & 0x1) << 4);
}
pin_ctrl &= ~(0x3 << 12);
pin_ctrl |= (strength << 12);
}
break;
}
}
cx25840_write(client, 0x164, gpio_data);
cx25840_write(client, 0x160, gpio_oe);
cx25840_write4(client, 0x120, pin_ctrl);
return 0;
}
static int common_s_io_pin_config(struct v4l2_subdev *sd, size_t n,
struct v4l2_subdev_io_pin_config *pincfg)
{
struct cx25840_state *state = to_state(sd);
if (is_cx2388x(state))
return cx23885_s_io_pin_config(sd, n, pincfg);
return 0;
}
/* ----------------------------------------------------------------------- */
static void init_dll1(struct i2c_client *client)
{
/* This is the Hauppauge sequence used to
* initialize the Delay Lock Loop 1 (ADC DLL). */
cx25840_write(client, 0x159, 0x23);
cx25840_write(client, 0x15a, 0x87);
cx25840_write(client, 0x15b, 0x06);
udelay(10);
cx25840_write(client, 0x159, 0xe1);
udelay(10);
cx25840_write(client, 0x15a, 0x86);
cx25840_write(client, 0x159, 0xe0);
cx25840_write(client, 0x159, 0xe1);
cx25840_write(client, 0x15b, 0x10);
}
static void init_dll2(struct i2c_client *client)
{
/* This is the Hauppauge sequence used to
* initialize the Delay Lock Loop 2 (ADC DLL). */
cx25840_write(client, 0x15d, 0xe3);
cx25840_write(client, 0x15e, 0x86);
cx25840_write(client, 0x15f, 0x06);
udelay(10);
cx25840_write(client, 0x15d, 0xe1);
cx25840_write(client, 0x15d, 0xe0);
cx25840_write(client, 0x15d, 0xe1);
}
static void cx25836_initialize(struct i2c_client *client)
{
/* reset configuration is described on page 3-77 of the CX25836 datasheet */
/* 2. */
cx25840_and_or(client, 0x000, ~0x01, 0x01);
cx25840_and_or(client, 0x000, ~0x01, 0x00);
/* 3a. */
cx25840_and_or(client, 0x15a, ~0x70, 0x00);
/* 3b. */
cx25840_and_or(client, 0x15b, ~0x1e, 0x06);
/* 3c. */
cx25840_and_or(client, 0x159, ~0x02, 0x02);
/* 3d. */
udelay(10);
/* 3e. */
cx25840_and_or(client, 0x159, ~0x02, 0x00);
/* 3f. */
cx25840_and_or(client, 0x159, ~0xc0, 0xc0);
/* 3g. */
cx25840_and_or(client, 0x159, ~0x01, 0x00);
cx25840_and_or(client, 0x159, ~0x01, 0x01);
/* 3h. */
cx25840_and_or(client, 0x15b, ~0x1e, 0x10);
}
static void cx25840_work_handler(struct work_struct *work)
{
struct cx25840_state *state = container_of(work, struct cx25840_state, fw_work);
cx25840_loadfw(state->c);
wake_up(&state->fw_wait);
}
static void cx25840_initialize(struct i2c_client *client)
{
DEFINE_WAIT(wait);
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
struct workqueue_struct *q;
/* datasheet startup in numbered steps, refer to page 3-77 */
/* 2. */
cx25840_and_or(client, 0x803, ~0x10, 0x00);
/* The default of this register should be 4, but I get 0 instead.
* Set this register to 4 manually. */
cx25840_write(client, 0x000, 0x04);
/* 3. */
init_dll1(client);
init_dll2(client);
cx25840_write(client, 0x136, 0x0a);
/* 4. */
cx25840_write(client, 0x13c, 0x01);
cx25840_write(client, 0x13c, 0x00);
/* 5. */
/* Do the firmware load in a work handler to prevent.
Otherwise the kernel is blocked waiting for the
bit-banging i2c interface to finish uploading the
firmware. */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
queue_work(q, &state->fw_work);
schedule();
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
/* 6. */
cx25840_write(client, 0x115, 0x8c);
cx25840_write(client, 0x116, 0x07);
cx25840_write(client, 0x118, 0x02);
/* 7. */
cx25840_write(client, 0x4a5, 0x80);
cx25840_write(client, 0x4a5, 0x00);
cx25840_write(client, 0x402, 0x00);
/* 8. */
cx25840_and_or(client, 0x401, ~0x18, 0);
cx25840_and_or(client, 0x4a2, ~0x10, 0x10);
/* steps 8c and 8d are done in change_input() */
/* 10. */
cx25840_write(client, 0x8d3, 0x1f);
cx25840_write(client, 0x8e3, 0x03);
cx25840_std_setup(client);
/* trial and error says these are needed to get audio */
cx25840_write(client, 0x914, 0xa0);
cx25840_write(client, 0x918, 0xa0);
cx25840_write(client, 0x919, 0x01);
/* stereo preferred */
cx25840_write(client, 0x809, 0x04);
/* AC97 shift */
cx25840_write(client, 0x8cf, 0x0f);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
static void cx23885_initialize(struct i2c_client *client)
{
DEFINE_WAIT(wait);
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
struct workqueue_struct *q;
/*
* Come out of digital power down
* The CX23888, at least, needs this, otherwise registers aside from
* 0x0-0x2 can't be read or written.
*/
cx25840_write(client, 0x000, 0);
/* Internal Reset */
cx25840_and_or(client, 0x102, ~0x01, 0x01);
cx25840_and_or(client, 0x102, ~0x01, 0x00);
/* Stop microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x00);
/* DIF in reset? */
cx25840_write(client, 0x398, 0);
/*
* Trust the default xtal, no division
* '885: 28.636363... MHz
* '887: 25.000000 MHz
* '888: 50.000000 MHz
*/
cx25840_write(client, 0x2, 0x76);
/* Power up all the PLL's and DLL */
cx25840_write(client, 0x1, 0x40);
/* Sys PLL */
switch (state->id) {
case V4L2_IDENT_CX23888_AV:
/*
* 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz
* 572.73 MHz before post divide
*/
/* HVR1850 or 50MHz xtal */
cx25840_write(client, 0x2, 0x71);
cx25840_write4(client, 0x11c, 0x01d1744c);
cx25840_write4(client, 0x118, 0x00000416);
cx25840_write4(client, 0x404, 0x0010253e);
cx25840_write4(client, 0x42c, 0x42600000);
cx25840_write4(client, 0x44c, 0x161f1000);
break;
case V4L2_IDENT_CX23887_AV:
/*
* 25.0 MHz * (0x16 + 0x1d1744c/0x2000000)/4 = 5 * 28.636363 MHz
* 572.73 MHz before post divide
*/
cx25840_write4(client, 0x11c, 0x01d1744c);
cx25840_write4(client, 0x118, 0x00000416);
break;
case V4L2_IDENT_CX23885_AV:
default:
/*
* 28.636363 MHz * (0x14 + 0x0/0x2000000)/4 = 5 * 28.636363 MHz
* 572.73 MHz before post divide
*/
cx25840_write4(client, 0x11c, 0x00000000);
cx25840_write4(client, 0x118, 0x00000414);
break;
}
/* Disable DIF bypass */
cx25840_write4(client, 0x33c, 0x00000001);
/* DIF Src phase inc */
cx25840_write4(client, 0x340, 0x0df7df83);
/*
* Vid PLL
* Setup for a BT.656 pixel clock of 13.5 Mpixels/second
*
* 28.636363 MHz * (0xf + 0x02be2c9/0x2000000)/4 = 8 * 13.5 MHz
* 432.0 MHz before post divide
*/
/* HVR1850 */
switch (state->id) {
case V4L2_IDENT_CX23888_AV:
/* 888/HVR1250 specific */
cx25840_write4(client, 0x10c, 0x13333333);
cx25840_write4(client, 0x108, 0x00000515);
break;
default:
cx25840_write4(client, 0x10c, 0x002be2c9);
cx25840_write4(client, 0x108, 0x0000040f);
}
/* Luma */
cx25840_write4(client, 0x414, 0x00107d12);
/* Chroma */
cx25840_write4(client, 0x420, 0x3d008282);
/*
* Aux PLL
* Initial setup for audio sample clock:
* 48 ksps, 16 bits/sample, x160 multiplier = 122.88 MHz
* Initial I2S output/master clock(?):
* 48 ksps, 16 bits/sample, x16 multiplier = 12.288 MHz
*/
switch (state->id) {
case V4L2_IDENT_CX23888_AV:
/*
* 50.0 MHz * (0x7 + 0x0bedfa4/0x2000000)/3 = 122.88 MHz
* 368.64 MHz before post divide
* 122.88 MHz / 0xa = 12.288 MHz
*/
/* HVR1850 or 50MHz xtal */
cx25840_write4(client, 0x114, 0x017dbf48);
cx25840_write4(client, 0x110, 0x000a030e);
break;
case V4L2_IDENT_CX23887_AV:
/*
* 25.0 MHz * (0xe + 0x17dbf48/0x2000000)/3 = 122.88 MHz
* 368.64 MHz before post divide
* 122.88 MHz / 0xa = 12.288 MHz
*/
cx25840_write4(client, 0x114, 0x017dbf48);
cx25840_write4(client, 0x110, 0x000a030e);
break;
case V4L2_IDENT_CX23885_AV:
default:
/*
* 28.636363 MHz * (0xc + 0x1bf0c9e/0x2000000)/3 = 122.88 MHz
* 368.64 MHz before post divide
* 122.88 MHz / 0xa = 12.288 MHz
*/
cx25840_write4(client, 0x114, 0x01bf0c9e);
cx25840_write4(client, 0x110, 0x000a030c);
break;
};
/* ADC2 input select */
cx25840_write(client, 0x102, 0x10);
/* VIN1 & VIN5 */
cx25840_write(client, 0x103, 0x11);
/* Enable format auto detect */
cx25840_write(client, 0x400, 0);
/* Fast subchroma lock */
/* White crush, Chroma AGC & Chroma Killer enabled */
cx25840_write(client, 0x401, 0xe8);
/* Select AFE clock pad output source */
cx25840_write(client, 0x144, 0x05);
/* Drive GPIO2 direction and values for HVR1700
* where an onboard mux selects the output of demodulator
* vs the 417. Failure to set this results in no DTV.
* It's safe to set this across all Hauppauge boards
* currently, regardless of the board type.
*/
cx25840_write(client, 0x160, 0x1d);
cx25840_write(client, 0x164, 0x00);
/* Do the firmware load in a work handler to prevent.
Otherwise the kernel is blocked waiting for the
bit-banging i2c interface to finish uploading the
firmware. */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
queue_work(q, &state->fw_work);
schedule();
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
/* Call the cx23885 specific std setup func, we no longer rely on
* the generic cx24840 func.
*/
cx23885_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
/* Disable and clear video interrupts - we don't use them */
cx25840_write4(client, CX25840_VID_INT_STAT_REG, 0xffffffff);
/* Disable and clear audio interrupts - we don't use them */
cx25840_write(client, CX25840_AUD_INT_CTRL_REG, 0xff);
cx25840_write(client, CX25840_AUD_INT_STAT_REG, 0xff);
/* CC raw enable */
/* - VIP 1.1 control codes - 10bit, blue field enable.
* - enable raw data during vertical blanking.
* - enable ancillary Data insertion for 656 or VIP.
*/
cx25840_write4(client, 0x404, 0x0010253e);
/* CC on - Undocumented Register */
cx25840_write(client, 0x42f, 0x66);
/* HVR-1250 / HVR1850 DIF related */
/* Power everything up */
cx25840_write4(client, 0x130, 0x0);
/* Undocumented */
cx25840_write4(client, 0x478, 0x6628021F);
/* AFE_CLK_OUT_CTRL - Select the clock output source as output */
cx25840_write4(client, 0x144, 0x5);
/* I2C_OUT_CTL - I2S output configuration as
* Master, Sony, Left justified, left sample on WS=1
*/
cx25840_write4(client, 0x918, 0x1a0);
/* AFE_DIAG_CTRL1 */
cx25840_write4(client, 0x134, 0x000a1800);
/* AFE_DIAG_CTRL3 - Inverted Polarity for Audio and Video */
cx25840_write4(client, 0x13c, 0x00310000);
}
/* ----------------------------------------------------------------------- */
static void cx231xx_initialize(struct i2c_client *client)
{
DEFINE_WAIT(wait);
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
struct workqueue_struct *q;
/* Internal Reset */
cx25840_and_or(client, 0x102, ~0x01, 0x01);
cx25840_and_or(client, 0x102, ~0x01, 0x00);
/* Stop microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x00);
/* DIF in reset? */
cx25840_write(client, 0x398, 0);
/* Trust the default xtal, no division */
/* This changes for the cx23888 products */
cx25840_write(client, 0x2, 0x76);
/* Bring down the regulator for AUX clk */
cx25840_write(client, 0x1, 0x40);
/* Disable DIF bypass */
cx25840_write4(client, 0x33c, 0x00000001);
/* DIF Src phase inc */
cx25840_write4(client, 0x340, 0x0df7df83);
/* Luma */
cx25840_write4(client, 0x414, 0x00107d12);
/* Chroma */
cx25840_write4(client, 0x420, 0x3d008282);
/* ADC2 input select */
cx25840_write(client, 0x102, 0x10);
/* VIN1 & VIN5 */
cx25840_write(client, 0x103, 0x11);
/* Enable format auto detect */
cx25840_write(client, 0x400, 0);
/* Fast subchroma lock */
/* White crush, Chroma AGC & Chroma Killer enabled */
cx25840_write(client, 0x401, 0xe8);
/* Do the firmware load in a work handler to prevent.
Otherwise the kernel is blocked waiting for the
bit-banging i2c interface to finish uploading the
firmware. */
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
queue_work(q, &state->fw_work);
schedule();
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
cx25840_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
/* CC raw enable */
cx25840_write(client, 0x404, 0x0b);
/* CC on */
cx25840_write(client, 0x42f, 0x66);
cx25840_write4(client, 0x474, 0x1e1e601a);
}
/* ----------------------------------------------------------------------- */
void cx25840_std_setup(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
int hblank, hactive, burst, vblank, vactive, sc;
int vblank656, src_decimation;
int luma_lpf, uv_lpf, comb;
u32 pll_int, pll_frac, pll_post;
/* datasheet startup, step 8d */
if (std & ~V4L2_STD_NTSC)
cx25840_write(client, 0x49f, 0x11);
else
cx25840_write(client, 0x49f, 0x14);
if (std & V4L2_STD_625_50) {
hblank = 132;
hactive = 720;
burst = 93;
vblank = 36;
vactive = 580;
vblank656 = 40;
src_decimation = 0x21f;
luma_lpf = 2;
if (std & V4L2_STD_SECAM) {
uv_lpf = 0;
comb = 0;
sc = 0x0a425f;
} else if (std == V4L2_STD_PAL_Nc) {
uv_lpf = 1;
comb = 0x20;
sc = 556453;
} else {
uv_lpf = 1;
comb = 0x20;
sc = 688739;
}
} else {
hactive = 720;
hblank = 122;
vactive = 487;
luma_lpf = 1;
uv_lpf = 1;
src_decimation = 0x21f;
if (std == V4L2_STD_PAL_60) {
vblank = 26;
vblank656 = 26;
burst = 0x5b;
luma_lpf = 2;
comb = 0x20;
sc = 688739;
} else if (std == V4L2_STD_PAL_M) {
vblank = 20;
vblank656 = 24;
burst = 0x61;
comb = 0x20;
sc = 555452;
} else {
vblank = 26;
vblank656 = 26;
burst = 0x5b;
comb = 0x66;
sc = 556063;
}
}
/* DEBUG: Displays configured PLL frequency */
if (!is_cx231xx(state)) {
pll_int = cx25840_read(client, 0x108);
pll_frac = cx25840_read4(client, 0x10c) & 0x1ffffff;
pll_post = cx25840_read(client, 0x109);
v4l_dbg(1, cx25840_debug, client,
"PLL regs = int: %u, frac: %u, post: %u\n",
pll_int, pll_frac, pll_post);
if (pll_post) {
int fin, fsc;
int pll = (28636363L * ((((u64)pll_int) << 25L) + pll_frac)) >> 25L;
pll /= pll_post;
v4l_dbg(1, cx25840_debug, client, "PLL = %d.%06d MHz\n",
pll / 1000000, pll % 1000000);
v4l_dbg(1, cx25840_debug, client, "PLL/8 = %d.%06d MHz\n",
pll / 8000000, (pll / 8) % 1000000);
fin = ((u64)src_decimation * pll) >> 12;
v4l_dbg(1, cx25840_debug, client,
"ADC Sampling freq = %d.%06d MHz\n",
fin / 1000000, fin % 1000000);
fsc = (((u64)sc) * pll) >> 24L;
v4l_dbg(1, cx25840_debug, client,
"Chroma sub-carrier freq = %d.%06d MHz\n",
fsc / 1000000, fsc % 1000000);
v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, "
"vblank %i, vactive %i, vblank656 %i, src_dec %i, "
"burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, "
"sc 0x%06x\n",
hblank, hactive, vblank, vactive, vblank656,
src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
}
}
/* Sets horizontal blanking delay and active lines */
cx25840_write(client, 0x470, hblank);
cx25840_write(client, 0x471,
0xff & (((hblank >> 8) & 0x3) | (hactive << 4)));
cx25840_write(client, 0x472, hactive >> 4);
/* Sets burst gate delay */
cx25840_write(client, 0x473, burst);
/* Sets vertical blanking delay and active duration */
cx25840_write(client, 0x474, vblank);
cx25840_write(client, 0x475,
0xff & (((vblank >> 8) & 0x3) | (vactive << 4)));
cx25840_write(client, 0x476, vactive >> 4);
cx25840_write(client, 0x477, vblank656);
/* Sets src decimation rate */
cx25840_write(client, 0x478, 0xff & src_decimation);
cx25840_write(client, 0x479, 0xff & (src_decimation >> 8));
/* Sets Luma and UV Low pass filters */
cx25840_write(client, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
/* Enables comb filters */
cx25840_write(client, 0x47b, comb);
/* Sets SC Step*/
cx25840_write(client, 0x47c, sc);
cx25840_write(client, 0x47d, 0xff & sc >> 8);
cx25840_write(client, 0x47e, 0xff & sc >> 16);
/* Sets VBI parameters */
if (std & V4L2_STD_625_50) {
cx25840_write(client, 0x47f, 0x01);
state->vbi_line_offset = 5;
} else {
cx25840_write(client, 0x47f, 0x00);
state->vbi_line_offset = 8;
}
}
/* ----------------------------------------------------------------------- */
static void input_change(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
/* Follow step 8c and 8d of section 3.16 in the cx25840 datasheet */
if (std & V4L2_STD_SECAM) {
cx25840_write(client, 0x402, 0);
}
else {
cx25840_write(client, 0x402, 0x04);
cx25840_write(client, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
}
cx25840_and_or(client, 0x401, ~0x60, 0);
cx25840_and_or(client, 0x401, ~0x60, 0x60);
/* Don't write into audio registers on cx2583x chips */
if (is_cx2583x(state))
return;
cx25840_and_or(client, 0x810, ~0x01, 1);
if (state->radio) {
cx25840_write(client, 0x808, 0xf9);
cx25840_write(client, 0x80b, 0x00);
}
else if (std & V4L2_STD_525_60) {
/* Certain Hauppauge PVR150 models have a hardware bug
that causes audio to drop out. For these models the
audio standard must be set explicitly.
To be precise: it affects cards with tuner models
85, 99 and 112 (model numbers from tveeprom). */
int hw_fix = state->pvr150_workaround;
if (std == V4L2_STD_NTSC_M_JP) {
/* Japan uses EIAJ audio standard */
cx25840_write(client, 0x808, hw_fix ? 0x2f : 0xf7);
} else if (std == V4L2_STD_NTSC_M_KR) {
/* South Korea uses A2 audio standard */
cx25840_write(client, 0x808, hw_fix ? 0x3f : 0xf8);
} else {
/* Others use the BTSC audio standard */
cx25840_write(client, 0x808, hw_fix ? 0x1f : 0xf6);
}
cx25840_write(client, 0x80b, 0x00);
} else if (std & V4L2_STD_PAL) {
/* Autodetect audio standard and audio system */
cx25840_write(client, 0x808, 0xff);
/* Since system PAL-L is pretty much non-existent and
not used by any public broadcast network, force
6.5 MHz carrier to be interpreted as System DK,
this avoids DK audio detection instability */
cx25840_write(client, 0x80b, 0x00);
} else if (std & V4L2_STD_SECAM) {
/* Autodetect audio standard and audio system */
cx25840_write(client, 0x808, 0xff);
/* If only one of SECAM-DK / SECAM-L is required, then force
6.5MHz carrier, else autodetect it */
if ((std & V4L2_STD_SECAM_DK) &&
!(std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) {
/* 6.5 MHz carrier to be interpreted as System DK */
cx25840_write(client, 0x80b, 0x00);
} else if (!(std & V4L2_STD_SECAM_DK) &&
(std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC))) {
/* 6.5 MHz carrier to be interpreted as System L */
cx25840_write(client, 0x80b, 0x08);
} else {
/* 6.5 MHz carrier to be autodetected */
cx25840_write(client, 0x80b, 0x10);
}
}
cx25840_and_or(client, 0x810, ~0x01, 0);
}
static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
enum cx25840_audio_input aud_input)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
u8 is_composite = (vid_input >= CX25840_COMPOSITE1 &&
vid_input <= CX25840_COMPOSITE8);
u8 is_component = (vid_input & CX25840_COMPONENT_ON) ==
CX25840_COMPONENT_ON;
u8 is_dif = (vid_input & CX25840_DIF_ON) ==
CX25840_DIF_ON;
u8 is_svideo = (vid_input & CX25840_SVIDEO_ON) ==
CX25840_SVIDEO_ON;
int luma = vid_input & 0xf0;
int chroma = vid_input & 0xf00;
u8 reg;
u32 val;
v4l_dbg(1, cx25840_debug, client,
"decoder set video input %d, audio input %d\n",
vid_input, aud_input);
if (vid_input >= CX25840_VIN1_CH1) {
v4l_dbg(1, cx25840_debug, client, "vid_input 0x%x\n",
vid_input);
reg = vid_input & 0xff;
is_composite = !is_component &&
((vid_input & CX25840_SVIDEO_ON) != CX25840_SVIDEO_ON);
v4l_dbg(1, cx25840_debug, client, "mux cfg 0x%x comp=%d\n",
reg, is_composite);
} else if (is_composite) {
reg = 0xf0 + (vid_input - CX25840_COMPOSITE1);
} else {
if ((vid_input & ~0xff0) ||
luma < CX25840_SVIDEO_LUMA1 || luma > CX25840_SVIDEO_LUMA8 ||
chroma < CX25840_SVIDEO_CHROMA4 || chroma > CX25840_SVIDEO_CHROMA8) {
v4l_err(client, "0x%04x is not a valid video input!\n",
vid_input);
return -EINVAL;
}
reg = 0xf0 + ((luma - CX25840_SVIDEO_LUMA1) >> 4);
if (chroma >= CX25840_SVIDEO_CHROMA7) {
reg &= 0x3f;
reg |= (chroma - CX25840_SVIDEO_CHROMA7) >> 2;
} else {
reg &= 0xcf;
reg |= (chroma - CX25840_SVIDEO_CHROMA4) >> 4;
}
}
/* The caller has previously prepared the correct routing
* configuration in reg (for the cx23885) so we have no
* need to attempt to flip bits for earlier av decoders.
*/
if (!is_cx2388x(state) && !is_cx231xx(state)) {
switch (aud_input) {
case CX25840_AUDIO_SERIAL:
/* do nothing, use serial audio input */
break;
case CX25840_AUDIO4: reg &= ~0x30; break;
case CX25840_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
case CX25840_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
case CX25840_AUDIO7: reg &= ~0xc0; break;
case CX25840_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
default:
v4l_err(client, "0x%04x is not a valid audio input!\n",
aud_input);
return -EINVAL;
}
}
cx25840_write(client, 0x103, reg);
/* Set INPUT_MODE to Composite, S-Video or Component */
if (is_component)
cx25840_and_or(client, 0x401, ~0x6, 0x6);
else
cx25840_and_or(client, 0x401, ~0x6, is_composite ? 0 : 0x02);
if (is_cx2388x(state)) {
/* Enable or disable the DIF for tuner use */
if (is_dif) {
cx25840_and_or(client, 0x102, ~0x80, 0x80);
/* Set of defaults for NTSC and PAL */
cx25840_write4(client, 0x31c, 0xc2262600);
cx25840_write4(client, 0x320, 0xc2262600);
/* 18271 IF - Nobody else yet uses a different
* tuner with the DIF, so these are reasonable
* assumptions (HVR1250 and HVR1850 specific).
*/
cx25840_write4(client, 0x318, 0xda262600);
cx25840_write4(client, 0x33c, 0x2a24c800);
cx25840_write4(client, 0x104, 0x0704dd00);
} else {
cx25840_write4(client, 0x300, 0x015c28f5);
cx25840_and_or(client, 0x102, ~0x80, 0);
cx25840_write4(client, 0x340, 0xdf7df83);
cx25840_write4(client, 0x104, 0x0704dd80);
cx25840_write4(client, 0x314, 0x22400600);
cx25840_write4(client, 0x318, 0x40002600);
cx25840_write4(client, 0x324, 0x40002600);
cx25840_write4(client, 0x32c, 0x0250e620);
cx25840_write4(client, 0x39c, 0x01FF0B00);
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
cx25840_write4(client, 0x418, 0x01008080);
cx25840_write4(client, 0x41c, 0x00000000);
cx25840_write4(client, 0x420, 0x001c3e0f);
cx25840_write4(client, 0x42c, 0x42600000);
cx25840_write4(client, 0x430, 0x0000039b);
cx25840_write4(client, 0x438, 0x00000000);
cx25840_write4(client, 0x440, 0xF8E3E824);
cx25840_write4(client, 0x444, 0x401040dc);
cx25840_write4(client, 0x448, 0xcd3f02a0);
cx25840_write4(client, 0x44c, 0x161f1000);
cx25840_write4(client, 0x450, 0x00000802);
cx25840_write4(client, 0x91c, 0x01000000);
cx25840_write4(client, 0x8e0, 0x03063870);
cx25840_write4(client, 0x8d4, 0x7FFF0024);
cx25840_write4(client, 0x8d0, 0x00063073);
cx25840_write4(client, 0x8c8, 0x00010000);
cx25840_write4(client, 0x8cc, 0x00080023);
/* DIF BYPASS */
cx25840_write4(client, 0x33c, 0x2a04c800);
}
/* Reset the DIF */
cx25840_write4(client, 0x398, 0);
}
if (!is_cx2388x(state) && !is_cx231xx(state)) {
/* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
cx25840_and_or(client, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0);
/* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2&CH3 */
if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
cx25840_and_or(client, 0x102, ~0x4, 4);
else
cx25840_and_or(client, 0x102, ~0x4, 0);
} else {
/* Set DUAL_MODE_ADC2 to 1 if component*/
cx25840_and_or(client, 0x102, ~0x4, is_component ? 0x4 : 0x0);
if (is_composite) {
/* ADC2 input select channel 2 */
cx25840_and_or(client, 0x102, ~0x2, 0);
} else if (!is_component) {
/* S-Video */
if (chroma >= CX25840_SVIDEO_CHROMA7) {
/* ADC2 input select channel 3 */
cx25840_and_or(client, 0x102, ~0x2, 2);
} else {
/* ADC2 input select channel 2 */
cx25840_and_or(client, 0x102, ~0x2, 0);
}
}
/* cx23885 / SVIDEO */
if (is_cx2388x(state) && is_svideo) {
#define AFE_CTRL (0x104)
#define MODE_CTRL (0x400)
cx25840_and_or(client, 0x102, ~0x2, 0x2);
val = cx25840_read4(client, MODE_CTRL);
val &= 0xFFFFF9FF;
/* YC */
val |= 0x00000200;
val &= ~0x2000;
cx25840_write4(client, MODE_CTRL, val);
val = cx25840_read4(client, AFE_CTRL);
/* Chroma in select */
val |= 0x00001000;
val &= 0xfffffe7f;
/* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8).
* This sets them to use video rather than audio.
* Only one of the two will be in use.
*/
cx25840_write4(client, AFE_CTRL, val);
} else
cx25840_and_or(client, 0x102, ~0x2, 0);
}
state->vid_input = vid_input;
state->aud_input = aud_input;
cx25840_audio_set_path(client);
input_change(client);
if (is_cx2388x(state)) {
/* Audio channel 1 src : Parallel 1 */
cx25840_write(client, 0x124, 0x03);
/* Select AFE clock pad output source */
cx25840_write(client, 0x144, 0x05);
/* I2S_IN_CTL: I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 */
cx25840_write(client, 0x914, 0xa0);
/* I2S_OUT_CTL:
* I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1
* I2S_OUT_MASTER_MODE = Master
*/
cx25840_write(client, 0x918, 0xa0);
cx25840_write(client, 0x919, 0x01);
} else if (is_cx231xx(state)) {
/* Audio channel 1 src : Parallel 1 */
cx25840_write(client, 0x124, 0x03);
/* I2S_IN_CTL: I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1 */
cx25840_write(client, 0x914, 0xa0);
/* I2S_OUT_CTL:
* I2S_IN_SONY_MODE, LEFT SAMPLE on WS=1
* I2S_OUT_MASTER_MODE = Master
*/
cx25840_write(client, 0x918, 0xa0);
cx25840_write(client, 0x919, 0x01);
}
if (is_cx2388x(state) && ((aud_input == CX25840_AUDIO7) ||
(aud_input == CX25840_AUDIO6))) {
/* Configure audio from LR1 or LR2 input */
cx25840_write4(client, 0x910, 0);
cx25840_write4(client, 0x8d0, 0x63073);
} else
if (is_cx2388x(state) && (aud_input == CX25840_AUDIO8)) {
/* Configure audio from tuner/sif input */
cx25840_write4(client, 0x910, 0x12b000c9);
cx25840_write4(client, 0x8d0, 0x1f063870);
}
if (is_cx2388x(state)) {
/* HVR1850 */
/* AUD_IO_CTRL - I2S Input, Parallel1*/
/* - Channel 1 src - Parallel1 (Merlin out) */
/* - Channel 2 src - Parallel2 (Merlin out) */
/* - Channel 3 src - Parallel3 (Merlin AC97 out) */
/* - I2S source and dir - Merlin, output */
cx25840_write4(client, 0x124, 0x100);
if (!is_dif) {
/* Stop microcontroller if we don't need it
* to avoid audio popping on svideo/composite use.
*/
cx25840_and_or(client, 0x803, ~0x10, 0x00);
}
}
return 0;
}
/* ----------------------------------------------------------------------- */
static int set_v4lstd(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
u8 fmt = 0; /* zero is autodetect */
u8 pal_m = 0;
/* First tests should be against specific std */
if (state->std == V4L2_STD_NTSC_M_JP) {
fmt = 0x2;
} else if (state->std == V4L2_STD_NTSC_443) {
fmt = 0x3;
} else if (state->std == V4L2_STD_PAL_M) {
pal_m = 1;
fmt = 0x5;
} else if (state->std == V4L2_STD_PAL_N) {
fmt = 0x6;
} else if (state->std == V4L2_STD_PAL_Nc) {
fmt = 0x7;
} else if (state->std == V4L2_STD_PAL_60) {
fmt = 0x8;
} else {
/* Then, test against generic ones */
if (state->std & V4L2_STD_NTSC)
fmt = 0x1;
else if (state->std & V4L2_STD_PAL)
fmt = 0x4;
else if (state->std & V4L2_STD_SECAM)
fmt = 0xc;
}
v4l_dbg(1, cx25840_debug, client, "changing video std to fmt %i\n",fmt);
/* Follow step 9 of section 3.16 in the cx25840 datasheet.
Without this PAL may display a vertical ghosting effect.
This happens for example with the Yuan MPC622. */
if (fmt >= 4 && fmt < 8) {
/* Set format to NTSC-M */
cx25840_and_or(client, 0x400, ~0xf, 1);
/* Turn off LCOMB */
cx25840_and_or(client, 0x47b, ~6, 0);
}
cx25840_and_or(client, 0x400, ~0xf, fmt);
cx25840_and_or(client, 0x403, ~0x3, pal_m);
if (is_cx2388x(state))
cx23885_std_setup(client);
else
cx25840_std_setup(client);
if (!is_cx2583x(state))
input_change(client);
return 0;
}
/* ----------------------------------------------------------------------- */
static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
cx25840_write(client, 0x414, ctrl->val - 128);
break;
case V4L2_CID_CONTRAST:
cx25840_write(client, 0x415, ctrl->val << 1);
break;
case V4L2_CID_SATURATION:
cx25840_write(client, 0x420, ctrl->val << 1);
cx25840_write(client, 0x421, ctrl->val << 1);
break;
case V4L2_CID_HUE:
cx25840_write(client, 0x422, ctrl->val);
break;
default:
return -EINVAL;
}
return 0;
}
/* ----------------------------------------------------------------------- */
static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
int is_50Hz = !(state->std & V4L2_STD_525_60);
if (fmt->code != V4L2_MBUS_FMT_FIXED)
return -EINVAL;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
Vlines = fmt->height + (is_50Hz ? 4 : 7);
if ((fmt->width * 16 < Hsrc) || (Hsrc < fmt->width) ||
(Vlines * 8 < Vsrc) || (Vsrc < Vlines)) {
v4l_err(client, "%dx%d is not a valid size!\n",
fmt->width, fmt->height);
return -ERANGE;
}
HSC = (Hsrc * (1 << 20)) / fmt->width - (1 << 20);
VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9));
VSC &= 0x1fff;
if (fmt->width >= 385)
filter = 0;
else if (fmt->width > 192)
filter = 1;
else if (fmt->width > 96)
filter = 2;
else
filter = 3;
v4l_dbg(1, cx25840_debug, client, "decoder set size %dx%d -> scale %ux%u\n",
fmt->width, fmt->height, HSC, VSC);
/* HSCALE=HSC */
cx25840_write(client, 0x418, HSC & 0xff);
cx25840_write(client, 0x419, (HSC >> 8) & 0xff);
cx25840_write(client, 0x41a, HSC >> 16);
/* VSCALE=VSC */
cx25840_write(client, 0x41c, VSC & 0xff);
cx25840_write(client, 0x41d, VSC >> 8);
/* VS_INTRLACE=1 VFILT=filter */
cx25840_write(client, 0x41e, 0x8 | filter);
return 0;
}
/* ----------------------------------------------------------------------- */
static void log_video_status(struct i2c_client *client)
{
static const char *const fmt_strs[] = {
"0x0",
"NTSC-M", "NTSC-J", "NTSC-4.43",
"PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60",
"0x9", "0xA", "0xB",
"SECAM",
"0xD", "0xE", "0xF"
};
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
u8 vidfmt_sel = cx25840_read(client, 0x400) & 0xf;
u8 gen_stat1 = cx25840_read(client, 0x40d);
u8 gen_stat2 = cx25840_read(client, 0x40e);
int vid_input = state->vid_input;
v4l_info(client, "Video signal: %spresent\n",
(gen_stat2 & 0x20) ? "" : "not ");
v4l_info(client, "Detected format: %s\n",
fmt_strs[gen_stat1 & 0xf]);
v4l_info(client, "Specified standard: %s\n",
vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
if (vid_input >= CX25840_COMPOSITE1 &&
vid_input <= CX25840_COMPOSITE8) {
v4l_info(client, "Specified video input: Composite %d\n",
vid_input - CX25840_COMPOSITE1 + 1);
} else {
v4l_info(client, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
(vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
}
v4l_info(client, "Specified audioclock freq: %d Hz\n", state->audclk_freq);
}
/* ----------------------------------------------------------------------- */
static void log_audio_status(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
u8 download_ctl = cx25840_read(client, 0x803);
u8 mod_det_stat0 = cx25840_read(client, 0x804);
u8 mod_det_stat1 = cx25840_read(client, 0x805);
u8 audio_config = cx25840_read(client, 0x808);
u8 pref_mode = cx25840_read(client, 0x809);
u8 afc0 = cx25840_read(client, 0x80b);
u8 mute_ctl = cx25840_read(client, 0x8d3);
int aud_input = state->aud_input;
char *p;
switch (mod_det_stat0) {
case 0x00: p = "mono"; break;
case 0x01: p = "stereo"; break;
case 0x02: p = "dual"; break;
case 0x04: p = "tri"; break;
case 0x10: p = "mono with SAP"; break;
case 0x11: p = "stereo with SAP"; break;
case 0x12: p = "dual with SAP"; break;
case 0x14: p = "tri with SAP"; break;
case 0xfe: p = "forced mode"; break;
default: p = "not defined";
}
v4l_info(client, "Detected audio mode: %s\n", p);
switch (mod_det_stat1) {
case 0x00: p = "not defined"; break;
case 0x01: p = "EIAJ"; break;
case 0x02: p = "A2-M"; break;
case 0x03: p = "A2-BG"; break;
case 0x04: p = "A2-DK1"; break;
case 0x05: p = "A2-DK2"; break;
case 0x06: p = "A2-DK3"; break;
case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
case 0x08: p = "AM-L"; break;
case 0x09: p = "NICAM-BG"; break;
case 0x0a: p = "NICAM-DK"; break;
case 0x0b: p = "NICAM-I"; break;
case 0x0c: p = "NICAM-L"; break;
case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break;
case 0x0e: p = "IF FM Radio"; break;
case 0x0f: p = "BTSC"; break;
case 0x10: p = "high-deviation FM"; break;
case 0x11: p = "very high-deviation FM"; break;
case 0xfd: p = "unknown audio standard"; break;
case 0xfe: p = "forced audio standard"; break;
case 0xff: p = "no detected audio standard"; break;
default: p = "not defined";
}
v4l_info(client, "Detected audio standard: %s\n", p);
v4l_info(client, "Audio microcontroller: %s\n",
(download_ctl & 0x10) ?
((mute_ctl & 0x2) ? "detecting" : "running") : "stopped");
switch (audio_config >> 4) {
case 0x00: p = "undefined"; break;
case 0x01: p = "BTSC"; break;
case 0x02: p = "EIAJ"; break;
case 0x03: p = "A2-M"; break;
case 0x04: p = "A2-BG"; break;
case 0x05: p = "A2-DK1"; break;
case 0x06: p = "A2-DK2"; break;
case 0x07: p = "A2-DK3"; break;
case 0x08: p = "A1 (6.0 MHz FM Mono)"; break;
case 0x09: p = "AM-L"; break;
case 0x0a: p = "NICAM-BG"; break;
case 0x0b: p = "NICAM-DK"; break;
case 0x0c: p = "NICAM-I"; break;
case 0x0d: p = "NICAM-L"; break;
case 0x0e: p = "FM radio"; break;
case 0x0f: p = "automatic detection"; break;
default: p = "undefined";
}
v4l_info(client, "Configured audio standard: %s\n", p);
if ((audio_config >> 4) < 0xF) {
switch (audio_config & 0xF) {
case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break;
case 0x01: p = "MONO2 (LANGUAGE B)"; break;
case 0x02: p = "MONO3 (STEREO forced MONO)"; break;
case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break;
case 0x04: p = "STEREO"; break;
case 0x05: p = "DUAL1 (AB)"; break;
case 0x06: p = "DUAL2 (AC) (FM)"; break;
case 0x07: p = "DUAL3 (BC) (FM)"; break;
case 0x08: p = "DUAL4 (AC) (AM)"; break;
case 0x09: p = "DUAL5 (BC) (AM)"; break;
case 0x0a: p = "SAP"; break;
default: p = "undefined";
}
v4l_info(client, "Configured audio mode: %s\n", p);
} else {
switch (audio_config & 0xF) {
case 0x00: p = "BG"; break;
case 0x01: p = "DK1"; break;
case 0x02: p = "DK2"; break;
case 0x03: p = "DK3"; break;
case 0x04: p = "I"; break;
case 0x05: p = "L"; break;
case 0x06: p = "BTSC"; break;
case 0x07: p = "EIAJ"; break;
case 0x08: p = "A2-M"; break;
case 0x09: p = "FM Radio"; break;
case 0x0f: p = "automatic standard and mode detection"; break;
default: p = "undefined";
}
v4l_info(client, "Configured audio system: %s\n", p);
}
if (aud_input) {
v4l_info(client, "Specified audio input: Tuner (In%d)\n", aud_input);
} else {
v4l_info(client, "Specified audio input: External\n");
}
switch (pref_mode & 0xf) {
case 0: p = "mono/language A"; break;
case 1: p = "language B"; break;
case 2: p = "language C"; break;
case 3: p = "analog fallback"; break;
case 4: p = "stereo"; break;
case 5: p = "language AC"; break;
case 6: p = "language BC"; break;
case 7: p = "language AB"; break;
default: p = "undefined";
}
v4l_info(client, "Preferred audio mode: %s\n", p);
if ((audio_config & 0xf) == 0xf) {
switch ((afc0 >> 3) & 0x3) {
case 0: p = "system DK"; break;
case 1: p = "system L"; break;
case 2: p = "autodetect"; break;
default: p = "undefined";
}
v4l_info(client, "Selected 65 MHz format: %s\n", p);
switch (afc0 & 0x7) {
case 0: p = "chroma"; break;
case 1: p = "BTSC"; break;
case 2: p = "EIAJ"; break;
case 3: p = "A2-M"; break;
case 4: p = "autodetect"; break;
default: p = "undefined";
}
v4l_info(client, "Selected 45 MHz format: %s\n", p);
}
}
/* ----------------------------------------------------------------------- */
/* This load_fw operation must be called to load the driver's firmware.
Without this the audio standard detection will fail and you will
only get mono.
Since loading the firmware is often problematic when the driver is
compiled into the kernel I recommend postponing calling this function
until the first open of the video device. Another reason for
postponing it is that loading this firmware takes a long time (seconds)
due to the slow i2c bus speed. So it will speed up the boot process if
you can avoid loading the fw as long as the video device isn't used. */
static int cx25840_load_fw(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!state->is_initialized) {
/* initialize and load firmware */
state->is_initialized = 1;
if (is_cx2583x(state))
cx25836_initialize(client);
else if (is_cx2388x(state))
cx23885_initialize(client);
else if (is_cx231xx(state))
cx231xx_initialize(client);
else
cx25840_initialize(client);
}
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int cx25840_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
reg->size = 1;
reg->val = cx25840_read(client, reg->reg & 0x0fff);
return 0;
}
static int cx25840_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
cx25840_write(client, reg->reg & 0x0fff, reg->val & 0xff);
return 0;
}
#endif
static int cx25840_s_audio_stream(struct v4l2_subdev *sd, int enable)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 v;
if (is_cx2583x(state) || is_cx2388x(state) || is_cx231xx(state))
return 0;
v4l_dbg(1, cx25840_debug, client, "%s audio output\n",
enable ? "enable" : "disable");
if (enable) {
v = cx25840_read(client, 0x115) | 0x80;
cx25840_write(client, 0x115, v);
v = cx25840_read(client, 0x116) | 0x03;
cx25840_write(client, 0x116, v);
} else {
v = cx25840_read(client, 0x115) & ~(0x80);
cx25840_write(client, 0x115, v);
v = cx25840_read(client, 0x116) & ~(0x03);
cx25840_write(client, 0x116, v);
}
return 0;
}
static int cx25840_s_stream(struct v4l2_subdev *sd, int enable)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 v;
v4l_dbg(1, cx25840_debug, client, "%s video output\n",
enable ? "enable" : "disable");
if (enable) {
if (is_cx2388x(state) || is_cx231xx(state)) {
v = cx25840_read(client, 0x421) | 0x0b;
cx25840_write(client, 0x421, v);
} else {
v = cx25840_read(client, 0x115) | 0x0c;
cx25840_write(client, 0x115, v);
v = cx25840_read(client, 0x116) | 0x04;
cx25840_write(client, 0x116, v);
}
} else {
if (is_cx2388x(state) || is_cx231xx(state)) {
v = cx25840_read(client, 0x421) & ~(0x0b);
cx25840_write(client, 0x421, v);
} else {
v = cx25840_read(client, 0x115) & ~(0x0c);
cx25840_write(client, 0x115, v);
v = cx25840_read(client, 0x116) & ~(0x04);
cx25840_write(client, 0x116, v);
}
}
return 0;
}
/* Query the current detected video format */
static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
v4l2_std_id stds[] = {
/* 0000 */ V4L2_STD_UNKNOWN,
/* 0001 */ V4L2_STD_NTSC_M,
/* 0010 */ V4L2_STD_NTSC_M_JP,
/* 0011 */ V4L2_STD_NTSC_443,
/* 0100 */ V4L2_STD_PAL,
/* 0101 */ V4L2_STD_PAL_M,
/* 0110 */ V4L2_STD_PAL_N,
/* 0111 */ V4L2_STD_PAL_Nc,
/* 1000 */ V4L2_STD_PAL_60,
/* 1001 */ V4L2_STD_UNKNOWN,
/* 1010 */ V4L2_STD_UNKNOWN,
/* 1001 */ V4L2_STD_UNKNOWN,
/* 1010 */ V4L2_STD_UNKNOWN,
/* 1011 */ V4L2_STD_UNKNOWN,
/* 1110 */ V4L2_STD_UNKNOWN,
/* 1111 */ V4L2_STD_UNKNOWN
};
u32 fmt = (cx25840_read4(client, 0x40c) >> 8) & 0xf;
*std = stds[ fmt ];
v4l_dbg(1, cx25840_debug, client, "g_std fmt = %x, v4l2_std_id = 0x%x\n",
fmt, (unsigned int)stds[ fmt ]);
return 0;
}
static int cx25840_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
/* A limited function that checks for signal status and returns
* the state.
*/
/* Check for status of Horizontal lock (SRC lock isn't reliable) */
if ((cx25840_read4(client, 0x40c) & 0x00010000) == 0)
*status |= V4L2_IN_ST_NO_SIGNAL;
return 0;
}
static int cx25840_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (state->radio == 0 && state->std == std)
return 0;
state->radio = 0;
state->std = std;
return set_v4lstd(client);
}
static int cx25840_s_radio(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
state->radio = 1;
return 0;
}
static int cx25840_s_video_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (is_cx2388x(state))
cx23885_std_setup(client);
return set_input(client, input, state->aud_input);
}
static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (is_cx2388x(state))
cx23885_std_setup(client);
return set_input(client, state->vid_input, input);
}
static int cx25840_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
input_change(client);
return 0;
}
static int cx25840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 vpres = cx25840_read(client, 0x40e) & 0x20;
u8 mode;
int val = 0;
if (state->radio)
return 0;
vt->signal = vpres ? 0xffff : 0x0;
if (is_cx2583x(state))
return 0;
vt->capability |=
V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
mode = cx25840_read(client, 0x804);
/* get rxsubchans and audmode */
if ((mode & 0xf) == 1)
val |= V4L2_TUNER_SUB_STEREO;
else
val |= V4L2_TUNER_SUB_MONO;
if (mode == 2 || mode == 4)
val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
if (mode & 0x10)
val |= V4L2_TUNER_SUB_SAP;
vt->rxsubchans = val;
vt->audmode = state->audmode;
return 0;
}
static int cx25840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (state->radio || is_cx2583x(state))
return 0;
switch (vt->audmode) {
case V4L2_TUNER_MODE_MONO:
/* mono -> mono
stereo -> mono
bilingual -> lang1 */
cx25840_and_or(client, 0x809, ~0xf, 0x00);
break;
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1:
/* mono -> mono
stereo -> stereo
bilingual -> lang1 */
cx25840_and_or(client, 0x809, ~0xf, 0x04);
break;
case V4L2_TUNER_MODE_LANG1_LANG2:
/* mono -> mono
stereo -> stereo
bilingual -> lang1/lang2 */
cx25840_and_or(client, 0x809, ~0xf, 0x07);
break;
case V4L2_TUNER_MODE_LANG2:
/* mono -> mono
stereo -> stereo
bilingual -> lang2 */
cx25840_and_or(client, 0x809, ~0xf, 0x01);
break;
default:
return -EINVAL;
}
state->audmode = vt->audmode;
return 0;
}
static int cx25840_reset(struct v4l2_subdev *sd, u32 val)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (is_cx2583x(state))
cx25836_initialize(client);
else if (is_cx2388x(state))
cx23885_initialize(client);
else if (is_cx231xx(state))
cx231xx_initialize(client);
else
cx25840_initialize(client);
return 0;
}
static int cx25840_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, state->id, state->rev);
}
static int cx25840_log_status(struct v4l2_subdev *sd)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
log_video_status(client);
if (!is_cx2583x(state))
log_audio_status(client);
cx25840_ir_log_status(sd);
v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
bool *handled)
{
struct cx25840_state *state = to_state(sd);
struct i2c_client *c = v4l2_get_subdevdata(sd);
u8 irq_stat, aud_stat, aud_en, ir_stat, ir_en;
u32 vid_stat, aud_mc_stat;
bool block_handled;
int ret = 0;
irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG);
v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (entry): %s %s %s\n",
irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ",
irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ",
irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " ");
if ((is_cx23885(state) || is_cx23887(state))) {
ir_stat = cx25840_read(c, CX25840_IR_STATS_REG);
ir_en = cx25840_read(c, CX25840_IR_IRQEN_REG);
v4l_dbg(2, cx25840_debug, c,
"AV Core ir IRQ status: %#04x disables: %#04x\n",
ir_stat, ir_en);
if (irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT) {
block_handled = false;
ret = cx25840_ir_irq_handler(sd,
status, &block_handled);
if (block_handled)
*handled = true;
}
}
aud_stat = cx25840_read(c, CX25840_AUD_INT_STAT_REG);
aud_en = cx25840_read(c, CX25840_AUD_INT_CTRL_REG);
v4l_dbg(2, cx25840_debug, c,
"AV Core audio IRQ status: %#04x disables: %#04x\n",
aud_stat, aud_en);
aud_mc_stat = cx25840_read4(c, CX23885_AUD_MC_INT_MASK_REG);
v4l_dbg(2, cx25840_debug, c,
"AV Core audio MC IRQ status: %#06x enables: %#06x\n",
aud_mc_stat >> CX23885_AUD_MC_INT_STAT_SHFT,
aud_mc_stat & CX23885_AUD_MC_INT_CTRL_BITS);
if (irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT) {
if (aud_stat) {
cx25840_write(c, CX25840_AUD_INT_STAT_REG, aud_stat);
*handled = true;
}
}
vid_stat = cx25840_read4(c, CX25840_VID_INT_STAT_REG);
v4l_dbg(2, cx25840_debug, c,
"AV Core video IRQ status: %#06x disables: %#06x\n",
vid_stat & CX25840_VID_INT_STAT_BITS,
vid_stat >> CX25840_VID_INT_MASK_SHFT);
if (irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT) {
if (vid_stat & CX25840_VID_INT_STAT_BITS) {
cx25840_write4(c, CX25840_VID_INT_STAT_REG, vid_stat);
*handled = true;
}
}
irq_stat = cx25840_read(c, CX23885_PIN_CTRL_IRQ_REG);
v4l_dbg(2, cx25840_debug, c, "AV Core IRQ status (exit): %s %s %s\n",
irq_stat & CX23885_PIN_CTRL_IRQ_IR_STAT ? "ir" : " ",
irq_stat & CX23885_PIN_CTRL_IRQ_AUD_STAT ? "aud" : " ",
irq_stat & CX23885_PIN_CTRL_IRQ_VID_STAT ? "vid" : " ");
return ret;
}
static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
bool *handled)
{
struct cx25840_state *state = to_state(sd);
*handled = false;
/* Only support the CX2388[578] AV Core for now */
if (is_cx2388x(state))
return cx23885_irq_handler(sd, status, handled);
return -ENODEV;
}
/* ----------------------------------------------------------------------- */
#define DIF_PLL_FREQ_WORD (0x300)
#define DIF_BPF_COEFF01 (0x348)
#define DIF_BPF_COEFF23 (0x34c)
#define DIF_BPF_COEFF45 (0x350)
#define DIF_BPF_COEFF67 (0x354)
#define DIF_BPF_COEFF89 (0x358)
#define DIF_BPF_COEFF1011 (0x35c)
#define DIF_BPF_COEFF1213 (0x360)
#define DIF_BPF_COEFF1415 (0x364)
#define DIF_BPF_COEFF1617 (0x368)
#define DIF_BPF_COEFF1819 (0x36c)
#define DIF_BPF_COEFF2021 (0x370)
#define DIF_BPF_COEFF2223 (0x374)
#define DIF_BPF_COEFF2425 (0x378)
#define DIF_BPF_COEFF2627 (0x37c)
#define DIF_BPF_COEFF2829 (0x380)
#define DIF_BPF_COEFF3031 (0x384)
#define DIF_BPF_COEFF3233 (0x388)
#define DIF_BPF_COEFF3435 (0x38c)
#define DIF_BPF_COEFF36 (0x390)
void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
{
u64 pll_freq;
u32 pll_freq_word;
v4l_dbg(1, cx25840_debug, client, "%s(%d)\n", __func__, ifHz);
/* Assuming TV */
/* Calculate the PLL frequency word based on the adjusted ifHz */
pll_freq = div_u64((u64)ifHz * 268435456, 50000000);
pll_freq_word = (u32)pll_freq;
cx25840_write4(client, DIF_PLL_FREQ_WORD, pll_freq_word);
/* Round down to the nearest 100KHz */
ifHz = (ifHz / 100000) * 100000;
if (ifHz < 3000000)
ifHz = 3000000;
if (ifHz > 16000000)
ifHz = 16000000;
v4l_dbg(1, cx25840_debug, client, "%s(%d) again\n", __func__, ifHz);
switch (ifHz) {
case 3000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0024);
cx25840_write4(client, DIF_BPF_COEFF67, 0x001bfff8);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff50);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed8fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe34);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfebaffc7);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d031f);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04f0065d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07010688);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x04c901d6);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f9d3);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f342);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f337);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf64efb22);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105070f);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0c460fce);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00220032);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00370026);
cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff91);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0efe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fdcc);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe0afedb);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440224);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0434060c);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738074e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x06090361);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99fb39);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef3b6);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21af2a5);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf573fa33);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034067d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bfb0fb9);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000e);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00200038);
cx25840_write4(client, DIF_BPF_COEFF67, 0x004c004f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x002fffdf);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff5cfeb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dfd92);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7ffe03);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36010a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x03410575);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x072607d2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x071804d5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134fcb7);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff451);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223f22e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4a7f94b);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xff6405e8);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bae0fa4);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00000008);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0036);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0056006d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00670030);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffbdff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46fd8d);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd25fd4f);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35ffe0);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0224049f);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9080e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x07ef0627);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9fe45);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f513);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250f1d2);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3ecf869);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930552);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b5f0f8f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0001);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000f002c);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0054007d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0093007c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0024ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6fdbb);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd03fcca);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51feb9);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x00eb0392);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270802);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08880750);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x044dffdb);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf5f8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0f193);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf342f78f);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc404b9);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b0e0f78);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff9);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0002001b);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0046007d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad00ba);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00870000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe1a);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1bfc7e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fda4);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xffa5025c);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x054507ad);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08dd0847);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80172);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef6ff);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313f170);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2abf6bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6041f);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0abc0f61);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff3);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff50006);
cx25840_write4(client, DIF_BPF_COEFF67, 0x002f006c);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b200e3);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00dc007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9fea0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd6bfc71);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fcb1);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe65010b);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x042d0713);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ec0906);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020302);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff823);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7f16a);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf228f5f5);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2a0384);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a670f4a);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7ffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9fff1);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0010004d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a100f2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x011a00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053ff44);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdedfca2);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fbef);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd39ffae);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x02ea0638);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08b50987);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230483);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f960);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45bf180);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1b8f537);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb6102e7);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a110f32);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffdd);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00024);
cx25840_write4(client, DIF_BPF_COEFF89, 0x007c00e5);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013a014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e6fff8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe98fd0f);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fb67);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc32fe54);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x01880525);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x083909c7);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x091505ee);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7fab3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52df1b4);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf15df484);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9b0249);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x09ba0f19);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 3900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff0);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffcf);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1fff6);
cx25840_write4(client, DIF_BPF_COEFF89, 0x004800be);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01390184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x016300ac);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff5efdb1);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb23);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb5cfd0d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x001703e4);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x077b09c4);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d2073c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251fc18);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61cf203);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf118f3dc);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d801aa);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x09600eff);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffefff4);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffca);
cx25840_write4(client, DIF_BPF_COEFF89, 0x000b0082);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01170198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10152);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0030fe7b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb24);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfac3fbe9);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5027f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0683097f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a560867);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2fd89);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723f26f);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0e8f341);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919010a);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x09060ee5);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fffb);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8ffca);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffa4);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffcd0036);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d70184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f601dc);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ffff60);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb6d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6efaf5);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd410103);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x055708f9);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e0969);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543ff02);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842f2f5);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cef2b2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85e006b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x08aa0ecb);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00050003);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff3ffd3);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaff8b);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ffe5);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0080014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe023f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ba0050);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fbf8);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa62fa3b);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9ff7e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x04010836);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa90a3d);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f007f);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf975f395);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cbf231);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ffcb);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x084c0eaf);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000a);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0000ffe4);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff81);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff96);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x001c00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70271);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0254013b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fcbd);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa9ff9c5);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbfdfe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x028c073b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a750adf);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e101fa);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab8f44e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0ddf1be);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ff2b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x07ed0e94);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000efff8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff87);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff54);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffb5007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01860270);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c00210);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fdb2);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb22f997);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2fc90);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0102060f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a050b4c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902036e);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0af51e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf106f15a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64efe8b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x078d0e77);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0019000e);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff9e);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff25);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff560000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112023b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f702c0);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfec8);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbe5f9b3);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947fb41);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xff7004b9);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a0b81);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a0004d8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd65f603);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf144f104);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aafdec);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x072b0e5a);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00060012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00200022);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ffc1);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ff10);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff09ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x008601d7);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f50340);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fff0);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcddfa19);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa1e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfde30343);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x08790b7f);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50631);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec7f6fc);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf198f0bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50dfd4e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x06c90e3d);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00220030);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffed);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff87ff15);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed6ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffed014c);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b90386);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110119);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfdfefac4);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6f92f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc6701b7);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x07670b44);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0776);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x002df807);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf200f086);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477fcb1);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x06650e1e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0009);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0038);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003f001b);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffbcff36);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec2feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff5600a5);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0248038d);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00232);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xff39fbab);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4f87f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb060020);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x062a0ad2);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf908a3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0192f922);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf27df05e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8fc14);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x06000e00);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 4900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0002);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00160037);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00510046);
cx25840_write4(client, DIF_BPF_COEFF89, 0xfff9ff6d);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed0fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefff0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01aa0356);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413032b);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x007ffcc5);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cf812);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9cefe87);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c90a2c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4309b4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f3fa4a);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf30ef046);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361fb7a);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x059b0de0);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000a002d);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00570067);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0037ffb5);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfefffe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62ff3d);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ec02e3);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x043503f6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x01befe05);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa27f7ee);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8c6fcf8);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x034c0954);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0aa4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x044cfb7e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3b1f03f);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2fae1);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x05340dc0);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff4);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffd001e);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0051007b);
cx25840_write4(client, DIF_BPF_COEFF89, 0x006e0006);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff48fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfe9a);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x001d023e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130488);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x02e6ff5b);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1ef812);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7f7fb7f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bc084e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430b72);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x059afcba);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf467f046);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cfa4a);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x04cd0da0);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff00009);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003f007f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00980056);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffa5feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe15);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff4b0170);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b004d7);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x03e800b9);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc48f87f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf768fa23);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022071f);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90c1b);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x06dafdfd);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf52df05e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef9b5);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x04640d7f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6fff3);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00250072);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00af009c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x000cff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13fdb8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe870089);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104e1);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04b8020f);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd98f92f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf71df8f0);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe8805ce);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0c9c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0808ff44);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf603f086);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af922);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x03fb0d5e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe0);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00050056);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b000d1);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0071ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53fd8c);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfddfff99);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104a3);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x054a034d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xff01fa1e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf717f7ed);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf50461);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50cf4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0921008d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf6e7f0bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff891);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x03920d3b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffffff3);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffd1);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5002f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x009c00ed);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00cb0000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfebafd94);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd61feb0);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d0422);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05970464);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0074fb41);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf759f721);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb7502de);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000d21);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a2201d4);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7d9f104);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf804);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x03280d19);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fffa);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3ffc9);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc90002);
cx25840_write4(client, DIF_BPF_COEFF89, 0x007500ef);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x010e007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3dfdcf);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd16fddd);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440365);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x059b0548);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3fc90);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7dff691);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0f014d);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020d23);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0318);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8d7f15a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f779);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x02bd0cf6);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00060001);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffecffc9);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffd4);
cx25840_write4(client, DIF_BPF_COEFF89, 0x004000d5);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013600f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd3fe39);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd04fd31);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff360277);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x055605ef);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x033efdfe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8a5f642);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbffb6);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10cfb);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50456);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9dff1be);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f6f2);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x02520cd2);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080009);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff8ffd2);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffac);
cx25840_write4(client, DIF_BPF_COEFF89, 0x000200a3);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013c014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x006dfec9);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2bfcb7);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe350165);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04cb0651);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477ff7e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9a5f635);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1fe20);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0ca8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c81058b);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfaf0f231);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f66d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x01e60cae);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 5900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000e);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0005ffe1);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffacff90);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5005f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01210184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fcff72);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd8afc77);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51003f);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04020669);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830103);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfad7f66b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8fc93);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430c2b);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d06b5);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfc08f2b2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af5ec);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x017b0c89);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0012fff5);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaff82);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff8e000f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e80198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750028);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe18fc75);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99ff15);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x03050636);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0656027f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc32f6e2);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614fb17);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20b87);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d7707d2);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfd26f341);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf56f);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x010f0c64);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00050012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001c000b);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff84);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ffbe);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00960184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd00da);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeccfcb2);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fdf9);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x01e005bc);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e703e4);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfdabf798);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f9b3);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510abd);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf08df);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfe48f3dc);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f4f6);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x00a20c3e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0002000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0021001f);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff97);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff74);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0034014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa0179);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff97fd2a);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fcfa);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x00a304fe);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310525);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xff37f886);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf86e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c709d0);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de209db);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xff6df484);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf481);
cx25840_write4(client, DIF_BPF_COEFF3435, 0x00360c18);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe000a);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0021002f);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ffb8);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff3b);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffcc00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa01fa);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0069fdd4);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fc26);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xff5d0407);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310638);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x00c9f9a8);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf74e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xff3908c3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de20ac3);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0093f537);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf410);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xffca0bf2);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb0003);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001c0037);
cx25840_write4(client, DIF_BPF_COEFF67, 0x002fffe2);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ff17);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff6a007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd0251);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0134fea5);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb8b);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe2002e0);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e70713);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0255faf5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f658);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0799);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf0b96);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x01b8f5f5);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f3a3);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xff5e0bca);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00120037);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00460010);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff8eff0f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff180000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750276);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01e8ff8d);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb31);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcfb0198);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x065607ad);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x03cefc64);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614f592);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0656);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d770c52);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x02daf6bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf33b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfef10ba3);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff5);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0005002f);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0054003c);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5ff22);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedfff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fc0267);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0276007e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb1c);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbfe003e);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830802);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0529fdec);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8f4fe);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd04ff);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d0cf6);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x03f8f78f);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af2d7);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe850b7b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff80020);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00560060);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0002ff4e);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec4ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x006d0225);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02d50166);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb4e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb35fee1);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477080e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x065bff82);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1f4a0);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610397);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c810d80);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0510f869);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f278);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe1a0b52);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffaffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffec000c);
cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0078);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0040ff8e);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecafeb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd301b6);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fc0235);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fbc5);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaaafd90);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x033e07d2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x075b011b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbf47a);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f0224);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50def);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0621f94b);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f21e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfdae0b29);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 6900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3fff6);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0037007f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0075ffdc);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef2fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3d0122);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02ea02dd);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fc79);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa65fc5d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3074e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x082102ad);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0ff48c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe00a9);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0e43);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0729fa33);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f1c9);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfd430b00);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0001fff3);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe2);
cx25840_write4(client, DIF_BPF_COEFF67, 0x001b0076);
cx25840_write4(client, DIF_BPF_COEFF89, 0x009c002d);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff35fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfeba0076);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x029f0352);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfd60);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa69fb53);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x00740688);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08a7042d);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb75f4d6);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600ff2d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a220e7a);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0827fb22);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf17a);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfcd80ad6);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff9);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffd2);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb005e);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b0007a);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8ffe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53ffc1);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0221038c);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fe6e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfab6fa80);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xff010587);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e90590);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf5f556);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfdb3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x09210e95);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0919fc15);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff12f);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc6e0aab);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00070000);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6ffc9);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0039);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00af00b8);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfff4feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13ff10);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01790388);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311ff92);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb48f9ed);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd980453);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e306cd);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe88f60a);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fc40);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x08080e93);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x09fdfd0c);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af0ea);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc050a81);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080008);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff0ffc9);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1000d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x009800e2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x005bff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe74);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00b50345);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000bc);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc18f9a1);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc4802f9);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x089807dc);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022f6f0);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fada);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x06da0e74);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ad3fe06);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef0ab);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb9c0a55);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000e);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffdffd0);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffdf);
cx25840_write4(client, DIF_BPF_COEFF89, 0x006e00f2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00b8ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfdf8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xffe302c8);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x041301dc);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd1af99e);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1e0183);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x080908b5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bcf801);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf985);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x059a0e38);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b99ff03);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cf071);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb330a2a);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00070011);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000affdf);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffa9ffb5);
cx25840_write4(client, DIF_BPF_COEFF89, 0x003700e6);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01010000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62fda8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff140219);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x043502e1);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe42f9e6);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa270000);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x073a0953);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x034cf939);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a4f845);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x044c0de1);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c4f0000);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2f03c);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfacc09fe);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00040012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0016fff3);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffafff95);
cx25840_write4(client, DIF_BPF_COEFF89, 0xfff900c0);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0130007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefd89);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe560146);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x041303bc);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xff81fa76);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cfe7d);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x063209b1);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c9fa93);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf71e);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f30d6e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cf200fd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361f00e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa6509d1);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00010010);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0008);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ff84);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffbc0084);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013e00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff56fd9f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdb8005c);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00460);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x00c7fb45);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4fd07);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x04fa09ce);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x062afc07);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407f614);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x01920ce0);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d8301fa);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8efe5);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa0009a4);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd000b);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001d);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff82);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff870039);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x012a014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffedfde7);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd47ff6b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104c6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0202fc4c);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6fbad);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x039909a7);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0767fd8e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482f52b);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x002d0c39);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e0002f4);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477efc2);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf99b0977);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 7900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa0004);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0020002d);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff91);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ffe8);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f70184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0086fe5c);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0bfe85);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104e5);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0323fd7d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa79);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x021d093f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0879ff22);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bf465);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec70b79);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e6803eb);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50defa5);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf937094a);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffd);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00190036);
cx25840_write4(client, DIF_BPF_COEFF67, 0x001bffaf);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff99);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00aa0198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112fef3);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd09fdb9);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04be);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x041bfecc);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947f978);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x00900897);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a00b9);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f3c5);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd650aa3);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ebc04de);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aaef8e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf8d5091c);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff6);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000e0038);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ffd7);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff56);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x004b0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0186ffa1);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd40fd16);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440452);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04de0029);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2f8b2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfefe07b5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a05024d);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef34d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0a09b8);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0efa05cd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64eef7d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf87308ed);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00000031);
cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0005);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff27);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffe4014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70057);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdacfca6);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3603a7);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05610184);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbf82e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd74069f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a7503d6);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff2ff);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab808b9);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f2306b5);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ef72);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf81308bf);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff30022);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00560032);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ff10);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8000f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe0106);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe46fc71);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3502c7);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x059e02ce);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9f7f2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfbff055b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa9054c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f2db);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf97507aa);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f350797);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ef6d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7b40890);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8000f);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00540058);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffcdff14);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff29007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f6019e);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff01fc7c);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5101bf);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x059203f6);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd41f7fe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfaa903f3);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e06a9);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf2e2);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842068b);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f320871);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85eef6e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7560860);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fff2);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1fff9);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00460073);
cx25840_write4(client, DIF_BPF_COEFF89, 0x000bff34);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee90000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10215);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xffd0fcc5);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99009d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x053d04f1);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5f853);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf97d0270);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a5607e4);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef314);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723055f);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f180943);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919ef75);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6fa0830);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff8);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe4);
cx25840_write4(client, DIF_BPF_COEFF67, 0x002f007f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0048ff6b);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec7ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0163025f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00a2fd47);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff73);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04a405b2);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0017f8ed);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf88500dc);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d208f9);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff370);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61c0429);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ee80a0b);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d8ef82);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6a00800);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0007ffff);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffd4);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0010007a);
cx25840_write4(client, DIF_BPF_COEFF89, 0x007cffb2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec6ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e60277);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0168fdf9);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fe50);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x03ce0631);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0188f9c8);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7c7ff43);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x091509e3);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f3f6);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52d02ea);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ea30ac9);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9bef95);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf64607d0);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00090007);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9ffca);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00065);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a10003);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee6feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053025b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0213fed0);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fd46);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x02c70668);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x02eafadb);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf74bfdae);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230a9c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f4a3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45b01a6);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e480b7c);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb61efae);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf5ef079f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 8900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000d);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff5ffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10043);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b20053);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff24fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9020c);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0295ffbb);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fc64);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x019b0654);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x042dfc1c);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf714fc2a);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020b21);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f575);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7005e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0dd80c24);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2aefcd);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf599076e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00060011);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0002ffcf);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffba0018);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad009a);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff79fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff260192);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e500ab);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fbb6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x005b05f7);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545fd81);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf723fabf);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80b70);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f669);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313ff15);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d550cbf);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6eff2);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544073d);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00030012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000fffdd);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffea);
cx25840_write4(client, DIF_BPF_COEFF89, 0x009300cf);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdcfe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea600f7);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fd0190);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb46);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xff150554);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0627fefd);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf778f978);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x044d0b87);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f77d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0fdcf);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbe0d4e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc4f01d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2070b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00000010);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001afff0);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffbf);
cx25840_write4(client, DIF_BPF_COEFF89, 0x006700ed);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe460047);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02db0258);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb1b);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddc0473);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c90082);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf811f85e);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c90b66);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff8ad);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250fc8d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c140dcf);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe93f04d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a106d9);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc000c);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00200006);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff9c);
cx25840_write4(client, DIF_BPF_COEFF89, 0x002f00ef);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a4ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dff92);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x028102f7);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb37);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbf035e);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07260202);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e8f778);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x01340b0d);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f9f4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223fb51);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b590e42);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xff64f083);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf45206a7);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90005);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001a);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff86);
cx25840_write4(client, DIF_BPF_COEFF89, 0xfff000d7);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fee5);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f60362);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb99);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbcc0222);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07380370);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f7f6cc);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xff990a7e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902fb50);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21afa1f);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8d0ea6);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034f0bf);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4050675);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffe);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001e002b);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff81);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffb400a5);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe50);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01460390);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfc3a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb1000ce);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x070104bf);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb37f65f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe0009bc);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00fcbb);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f8f8);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b20efc);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105f101);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3ba0642);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff7);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00150036);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff8c);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff810061);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013d007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe71fddf);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x007c0380);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fd13);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa94ff70);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x068005e2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9bf633);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc7308ca);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5fe30);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf274f7e0);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c90f43);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d4f147);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371060f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fff1);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00090038);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffa7);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff5e0012);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013200f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee3fd9b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaa0331);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fe15);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa60fe18);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bd06d1);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1bf64a);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafa07ae);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7effab);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d5f6d7);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d30f7a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a3f194);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf32905dc);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffb0032);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003fffcd);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effc1);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6efd8a);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfedd02aa);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0ff34);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa74fcd7);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bf0781);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaaf6a3);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99e066b);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90128);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf359f5e1);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d20fa2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0370f1e5);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e405a8);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 9900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffef0024);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0051fffa);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff54ff77);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00be0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0006fdad);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe2701f3);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413005e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad1fbba);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x039007ee);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x013bf73d);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868050a);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4302a1);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fdf4fe);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c70fba);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x043bf23c);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a10575);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fff1);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe50011);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00570027);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff70ff3c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00620198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x009efe01);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95011a);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350183);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb71fad0);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x023c0812);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c3f811);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75e0390);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0411);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c1f432);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b30fc1);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0503f297);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2610541);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0006fff7);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdffffc);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00510050);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff9dff18);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffc0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0128fe80);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32002e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130292);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4dfa21);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d107ee);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0435f91c);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6850205);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430573);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a1f37d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x03990fba);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c7f2f8);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222050d);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffe);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdfffe7);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003f006e);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffd6ff0f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0197ff1f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd05ff3e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0037c);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd59f9b7);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5d0781);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0585fa56);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4006f);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf906c4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69df2e0);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x02790fa2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0688f35d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e604d8);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00090005);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4ffd6);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0025007e);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0014ff20);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3c00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e1ffd0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd12fe5c);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110433);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88f996);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf106d1);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aafbb7);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57efed8);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e07ff);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b0f25e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x01560f7a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0745f3c7);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1ac04a4);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000c);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffedffcb);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0005007d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0050ff4c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ff0086);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd58fd97);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104ad);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xffcaf9c0);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc9905e2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x079afd35);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf555fd46);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50920);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d9f1f6);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x00310f43);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fdf435);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174046f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00050011);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffaffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5006b);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0082ff8c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f00130);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd2fcfc);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04e3);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x010efa32);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb6404bf);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x084efec5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569fbc2);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000a23);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa15f1ab);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0b0efc);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b0f4a7);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13f043a);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00020012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0007ffcd);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9004c);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a4ffd9);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b401c1);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe76fc97);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404d2);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0245fae8);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5f0370);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1005f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bcfa52);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020b04);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb60f17b);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde70ea6);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x095df51e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10c0405);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0011);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0014ffdb);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40023);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2002a);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedbff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150022d);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff38fc6f);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36047b);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x035efbda);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9940202);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ee01f5);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf649f8fe);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10bc2);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb6f169);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc60e42);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a04f599);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0db03d0);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb000d);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001dffed);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaafff5);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00aa0077);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00ce026b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x000afc85);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3503e3);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x044cfcfb);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90c0082);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5037f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf710f7cc);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0c59);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe16f173);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaa0dcf);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa5f617);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0ad039b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 10900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90006);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00210003);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffc8);
cx25840_write4(client, DIF_BPF_COEFF89, 0x008e00b6);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff63fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x003a0275);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00dafcda);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd510313);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0501fe40);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cbfefd);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x087604f0);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80af6c2);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430cc8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7af19a);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa940d4e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3ff699);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0810365);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffff);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00210018);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffa3);
cx25840_write4(client, DIF_BPF_COEFF89, 0x006000e1);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc4fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0024b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x019afd66);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc990216);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0575ff99);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4fd81);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d40640);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf932f5e6);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20d0d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x00dff1de);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9860cbf);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd1f71e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058032f);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff8);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001b0029);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff8a);
cx25840_write4(client, DIF_BPF_COEFF89, 0x002600f2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x002cfe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0f01f0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x023bfe20);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc1700fa);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a200f7);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf927fc1c);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f40765);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa82f53b);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510d27);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0243f23d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8810c24);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5cf7a7);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf03102fa);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00110035);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff81);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffe700e7);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x008ffeb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94016d);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b0fefb);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3ffd1);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05850249);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c1fadb);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x05de0858);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf2f4c4);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c70d17);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a0f2b8);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7870b7c);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdff833);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00d02c4);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00040038);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ff88);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffac00c2);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e2ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe3900cb);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f1ffe9);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3feaa);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05210381);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9cf9c8);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x04990912);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7af484);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xff390cdb);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f4f34d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69a0ac9);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5af8c1);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefec028e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0000ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff60033);
cx25840_write4(client, DIF_BPF_COEFF67, 0x002fff9f);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff7b0087);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x011eff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe080018);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f900d8);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fd96);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x04790490);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbadf8ed);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x032f098e);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xff10f47d);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0c75);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x063cf3fc);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5ba0a0b);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dccf952);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcd0258);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff1);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffea0026);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0046ffc3);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff5a003c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04ff63);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c801b8);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fca6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397056a);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcecf853);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x01ad09c9);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x00acf4ad);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0be7);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0773f4c2);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e90943);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35f9e6);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb10221);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff6);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe20014);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0054ffee);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effeb);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2efebb);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260027a);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fbe6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x02870605);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4af7fe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x001d09c1);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0243f515);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd0b32);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0897f59e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4280871);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e95fa7c);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef9701eb);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffd);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffff);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0056001d);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff57ff9c);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x011300f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe82fe2e);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ca0310);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb62);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155065a);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xffbaf7f2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8c0977);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x03cef5b2);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610a58);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a5f68f);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3790797);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eebfb14);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef8001b5);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080004);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe9);
cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0047);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff75ff58);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef9fdc8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111036f);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb21);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x00120665);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x012df82e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd0708ec);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542f682);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f095c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9af792);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2db06b5);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f38fbad);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6c017e);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 11900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000b);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe7ffd8);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00370068);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffa4ff28);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00790184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87fd91);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00430392);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb26);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfece0626);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294f8b2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb990825);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0698f77f);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe0842);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b73f8a7);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf25105cd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7bfc48);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5a0148);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00050010);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff2ffcc);
cx25840_write4(client, DIF_BPF_COEFF67, 0x001b007b);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffdfff10);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00140198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0020fd8e);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff710375);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb73);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9a059f);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e0f978);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4e0726);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c8f8a7);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600070c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2ff9c9);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1db04de);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4fce5);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4b0111);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00010012);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffffffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb007e);
cx25840_write4(client, DIF_BPF_COEFF89, 0x001dff14);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffad0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b7fdbe);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9031b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fc01);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc8504d6);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0504fa79);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf93005f6);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x08caf9f2);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52b05c0);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccbfaf9);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf17903eb);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3fd83);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3f00db);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe0011);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000cffcc);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0071);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0058ff32);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4f014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x013cfe1f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfb028a);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fcc9);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9d03d6);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f4fbad);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848049d);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0999fb5b);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf4820461);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d46fc32);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12d02f4);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x1007fe21);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3600a4);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0017ffd9);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc10055);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0088ff68);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0400f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a6fea7);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7501cc);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fdc0);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaef02a8);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a7fd07);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79d0326);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a31fcda);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf40702f3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9ffd72);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f601fa);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x1021fec0);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2f006d);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80007);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001fffeb);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaf002d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a8ffb0);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e9ff4c);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2000ee);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fed8);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82015c);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0715fe7d);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7340198);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8dfe69);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bd017c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd5feb8);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d500fd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x1031ff60);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2b0037);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff70000);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00220000);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffa90000);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b30000);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec20000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x02000000);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd030000);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350000);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa5e0000);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x073b0000);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7110000);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aac0000);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a40000);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de70000);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0c90000);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x10360000);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef290000);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff9);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001f0015);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffd3);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a80050);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e900b4);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd20ff12);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130128);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82fea4);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x07150183);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf734fe68);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8d0197);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdfe84);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd50148);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d5ff03);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x103100a0);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2bffc9);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00170027);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ffab);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00880098);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff04ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a60159);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd75fe34);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00240);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaeffd58);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a702f9);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79dfcda);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a310326);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fd0d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9f028e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f6fe06);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x10210140);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2fff93);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000c0034);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff8f);
cx25840_write4(client, DIF_BPF_COEFF89, 0x005800ce);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4ffeb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x013c01e1);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfbfd76);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110337);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9dfc2a);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f40453);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848fb63);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x099904a5);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fb9f);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d4603ce);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12dfd0c);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x100701df);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef36ff5c);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 12900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0001ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffff0038);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff82);
cx25840_write4(client, DIF_BPF_COEFF89, 0x001d00ec);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffadfe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b70242);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9fce5);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x024103ff);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc85fb2a);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05040587);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf930fa0a);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x08ca060e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfa40);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccb0507);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf179fc15);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3027d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3fff25);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff0);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff20034);
cx25840_write4(client, DIF_BPF_COEFF67, 0x001bff85);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffdf00f0);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0014fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00200272);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff71fc8b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d048d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9afa61);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e00688);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4ef8da);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c80759);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f8f4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2f0637);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1dbfb22);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4031b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4bfeef);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff5);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe70028);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ff98);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffa400d8);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0079fe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87026f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0043fc6e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404da);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfecef9da);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294074e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb99f7db);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x06980881);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef7be);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b730759);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf251fa33);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7b03b8);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5afeb8);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffc);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe00017);
cx25840_write4(client, DIF_BPF_COEFF67, 0x004cffb9);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff7500a8);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef90238);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111fc91);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604df);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0012f99b);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x012d07d2);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd07f714);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542097e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff6a4);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9a086e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2dbf94b);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f380453);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6cfe82);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080003);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffde0001);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0056ffe3);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff570064);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0113ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe8201d2);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01cafcf0);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35049e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155f9a6);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xffba080e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8cf689);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x03ce0a4e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f5a8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a50971);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf379f869);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eeb04ec);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef80fe4b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000a);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe2ffec);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00540012);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e0015);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2e0145);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260fd86);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51041a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0287f9fb);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4a0802);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x001df63f);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x02430aeb);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf4ce);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x08970a62);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf428f78f);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e950584);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xef97fe15);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffeaffda);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0046003d);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff5affc4);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04009d);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c8fe48);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99035a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397fa96);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcec07ad);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x01adf637);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x00ac0b53);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef419);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x07730b3e);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e9f6bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35061a);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb1fddf);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00000012);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfff6ffcd);
cx25840_write4(client, DIF_BPF_COEFF67, 0x002f0061);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff7bff79);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x011e007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe08ffe8);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f9ff28);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17026a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0479fb70);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbad0713);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x032ff672);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xff100b83);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff38b);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x063c0c04);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5baf5f5);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dcc06ae);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcdfda8);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0012);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0004ffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00100078);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffacff3e);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e200f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe39ff35);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f10017);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd30156);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0521fc7f);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9c0638);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0499f6ee);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7a0b7c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f325);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f40cb3);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69af537);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5a073f);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xefecfd72);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0011ffcb);
cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0007f);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffe7ff19);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x008f014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94fe93);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b00105);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3002f);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x0585fdb7);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c10525);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x05def7a8);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf20b3c);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f2e9);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a00d48);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf787f484);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdf07cd);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00dfd3c);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 13900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80008);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001bffd7);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10076);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0026ff0e);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x002c0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0ffe10);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x023b01e0);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff06);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a2ff09);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf92703e4);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f4f89b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa820ac5);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f2d9);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x02430dc3);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf881f3dc);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5c0859);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf031fd06);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80001);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0021ffe8);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffba005d);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0060ff1f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc40198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0fdb5);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x019a029a);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fdea);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x05750067);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4027f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d4f9c0);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf9320a1a);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f2f3);
cx25840_write4(client, DIF_BPF_COEFF2829, 0x00df0e22);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xf986f341);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd108e2);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058fcd1);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0021fffd);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0038);
cx25840_write4(client, DIF_BPF_COEFF89, 0x008eff4a);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff630184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x003afd8b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x00da0326);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fced);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x050101c0);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cb0103);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x0876fb10);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80a093e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f338);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7a0e66);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa94f2b2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3f0967);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf081fc9b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff3);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001d0013);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa000b);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00aaff89);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00cefd95);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x000a037b);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fc1d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x044c0305);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90cff7e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5fc81);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7100834);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff3a7);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe160e8d);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaaf231);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa509e9);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0adfc65);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00140025);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffdd);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2ffd6);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedb00f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150fdd3);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xff380391);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb85);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x035e0426);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xf994fdfe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08eefe0b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6490702);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f43e);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb60e97);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc6f1be);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a040a67);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0dbfc30);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0002ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00070033);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ffb4);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00a40027);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b4fe3f);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe760369);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb2e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x02450518);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5ffc90);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1ffa1);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bc05ae);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902f4fc);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb600e85);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde7f15a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x095d0ae2);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10cfbfb);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0005ffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffa0038);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff95);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00820074);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f0fed0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd20304);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb1d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x010e05ce);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb64fb41);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x084e013b);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569043e);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00f5dd);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa150e55);
cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0bf104);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b00b59);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13ffbc6);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fff4);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffed0035);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff83);
cx25840_write4(client, DIF_BPF_COEFF89, 0x005000b4);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6ff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ffff7a);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd580269);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fb53);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xffca0640);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc99fa1e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x079a02cb);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55502ba);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5f6e0);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d90e0a);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0031f0bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fd0bcb);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174fb91);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0009fffb);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4002a);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ff82);
cx25840_write4(client, DIF_BPF_COEFF89, 0x001400e0);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3cff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e10030);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1201a4);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fbcd);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88066a);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf1f92f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aa0449);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57e0128);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7ef801);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b00da2);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0156f086);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x07450c39);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1acfb5c);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00080002);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0019);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003fff92);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffd600f1);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x019700e1);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0500c2);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fc84);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd590649);
cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5df87f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x058505aa);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4ff91);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9f93c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69d0d20);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0279f05e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x06880ca3);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e6fb28);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 14900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x00060009);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0004);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0051ffb0);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff9d00e8);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffcfe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x01280180);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32ffd2);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fd6e);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4d05df);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d1f812);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x043506e4);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf685fdfb);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fa8d);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a10c83);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0399f046);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c70d08);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222faf3);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffe5ffef);
cx25840_write4(client, DIF_BPF_COEFF67, 0x0057ffd9);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff7000c4);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0062fe68);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x009e01ff);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95fee6);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0435fe7d);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb710530);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x023cf7ee);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c307ef);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75efc70);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5cfbef);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c10bce);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b3f03f);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x05030d69);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf261fabf);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15100000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0012);
cx25840_write4(client, DIF_BPF_COEFF45, 0xffefffdc);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00510006);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff540089);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00befe7c);
cx25840_write4(client, DIF_BPF_COEFF1213, 0x00060253);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe27fe0d);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413ffa2);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad10446);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0390f812);
cx25840_write4(client, DIF_BPF_COEFF2223, 0x013b08c3);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868faf6);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fd5f);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fd0b02);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c7f046);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x043b0dc4);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a1fa8b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15200000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0012);
cx25840_write4(client, DIF_BPF_COEFF45, 0xfffbffce);
cx25840_write4(client, DIF_BPF_COEFF67, 0x003f0033);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e003f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106feb6);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6e0276);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeddfd56);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000cc);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa740329);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bff87f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaa095d);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99ef995);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9fed8);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3590a1f);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d2f05e);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x03700e1b);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e4fa58);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15300000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9000f);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0009ffc8);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00250059);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff5effee);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0132ff10);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee30265);
cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaafccf);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x031101eb);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6001e8);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bdf92f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1b09b6);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafaf852);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0055);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d50929);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d3f086);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a30e6c);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf329fa24);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15400000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80009);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0015ffca);
cx25840_write4(client, DIF_BPF_COEFF67, 0x00050074);
cx25840_write4(client, DIF_BPF_COEFF89, 0xff81ff9f);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x013dff82);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe710221);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x007cfc80);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x024102ed);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa940090);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0680fa1e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9b09cd);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc73f736);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad501d0);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2740820);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c9f0bd);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d40eb9);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371f9f1);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15500000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80002);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001effd5);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5007f);
cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff5b);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2401b0);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0146fc70);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d03c6);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb10ff32);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0701fb41);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb3709a1);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f644);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000345);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2350708);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b2f104);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x01050eff);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3baf9be);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15600000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0022ffe6);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9007a);
cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff29);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2007e);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01011b);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f6fc9e);
cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440467);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbccfdde);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738fc90);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f70934);
cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99f582);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x090204b0);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21a05e1);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8df15a);
cx25840_write4(client, DIF_BPF_COEFF3233, 0x00340f41);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf405f98b);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15700000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcfff4);
cx25840_write4(client, DIF_BPF_COEFF45, 0x0020fffa);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40064);
cx25840_write4(client, DIF_BPF_COEFF89, 0x002fff11);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a400f0);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0d006e);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x0281fd09);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604c9);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbffca2);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0726fdfe);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e80888);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134f4f3);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1060c);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf22304af);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b59f1be);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xff640f7d);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf452f959);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15800000:
cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0000fff0);
cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0010);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa0041);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0067ff13);
cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043014a);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46ffb9);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02dbfda8);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3504e5);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddcfb8d);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9ff7e);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf81107a2);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9f49a);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0753);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2500373);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c14f231);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930fb3);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a1f927);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 15900000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0003ffee);
cx25840_write4(client, DIF_BPF_COEFF45, 0x000f0023);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0016);
cx25840_write4(client, DIF_BPF_COEFF89, 0x0093ff31);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdc0184);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6ff09);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fdfe70);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5104ba);
cx25840_write4(client, DIF_BPF_COEFF1819, 0xff15faac);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270103);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7780688);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x044df479);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430883);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a00231);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbef2b2);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc40fe3);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2f8f5);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
case 16000000:
cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
cx25840_write4(client, DIF_BPF_COEFF23, 0x0006ffef);
cx25840_write4(client, DIF_BPF_COEFF45, 0x00020031);
cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffe8);
cx25840_write4(client, DIF_BPF_COEFF89, 0x00adff66);
cx25840_write4(client, DIF_BPF_COEFF1011, 0xff790198);
cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe6e);
cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e5ff55);
cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99044a);
cx25840_write4(client, DIF_BPF_COEFF1819, 0x005bfa09);
cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545027f);
cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7230541);
cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b8f490);
cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20997);
cx25840_write4(client, DIF_BPF_COEFF2829, 0xf31300eb);
cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d55f341);
cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6100e);
cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544f8c3);
cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
break;
}
}
static void cx23885_std_setup(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
u32 ifHz;
cx25840_write4(client, 0x478, 0x6628021F);
cx25840_write4(client, 0x400, 0x0);
cx25840_write4(client, 0x4b4, 0x20524030);
cx25840_write4(client, 0x47c, 0x010a8263);
if (std & V4L2_STD_NTSC) {
v4l_dbg(1, cx25840_debug, client, "%s() Selecting NTSC",
__func__);
/* Horiz / vert timing */
cx25840_write4(client, 0x428, 0x1e1e601a);
cx25840_write4(client, 0x424, 0x5b2d007a);
/* DIF NTSC */
cx25840_write4(client, 0x304, 0x6503bc0c);
cx25840_write4(client, 0x308, 0xbd038c85);
cx25840_write4(client, 0x30c, 0x1db4640a);
cx25840_write4(client, 0x310, 0x00008800);
cx25840_write4(client, 0x314, 0x44400400);
cx25840_write4(client, 0x32c, 0x0c800800);
cx25840_write4(client, 0x330, 0x27000100);
cx25840_write4(client, 0x334, 0x1f296e1f);
cx25840_write4(client, 0x338, 0x009f50c1);
cx25840_write4(client, 0x340, 0x1befbf06);
cx25840_write4(client, 0x344, 0x000035e8);
/* DIF I/F */
ifHz = 5400000;
} else {
v4l_dbg(1, cx25840_debug, client, "%s() Selecting PAL-BG",
__func__);
/* Horiz / vert timing */
cx25840_write4(client, 0x428, 0x28244024);
cx25840_write4(client, 0x424, 0x5d2d0084);
/* DIF */
cx25840_write4(client, 0x304, 0x6503bc0c);
cx25840_write4(client, 0x308, 0xbd038c85);
cx25840_write4(client, 0x30c, 0x1db4640a);
cx25840_write4(client, 0x310, 0x00008800);
cx25840_write4(client, 0x314, 0x44400600);
cx25840_write4(client, 0x32c, 0x0c800800);
cx25840_write4(client, 0x330, 0x27000100);
cx25840_write4(client, 0x334, 0x213530ec);
cx25840_write4(client, 0x338, 0x00a65ba8);
cx25840_write4(client, 0x340, 0x1befbf06);
cx25840_write4(client, 0x344, 0x000035e8);
/* DIF I/F */
ifHz = 6000000;
}
cx23885_dif_setup(client, ifHz);
/* Explicitly ensure the inputs are reconfigured after
* a standard change.
*/
set_input(client, state->vid_input, state->aud_input);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
.s_ctrl = cx25840_s_ctrl,
};
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
.g_chip_ident = cx25840_g_chip_ident,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.s_std = cx25840_s_std,
.g_std = cx25840_g_std,
.reset = cx25840_reset,
.load_fw = cx25840_load_fw,
.s_io_pin_config = common_s_io_pin_config,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = cx25840_g_register,
.s_register = cx25840_s_register,
#endif
.interrupt_service_routine = cx25840_irq_handler,
};
static const struct v4l2_subdev_tuner_ops cx25840_tuner_ops = {
.s_frequency = cx25840_s_frequency,
.s_radio = cx25840_s_radio,
.g_tuner = cx25840_g_tuner,
.s_tuner = cx25840_s_tuner,
};
static const struct v4l2_subdev_audio_ops cx25840_audio_ops = {
.s_clock_freq = cx25840_s_clock_freq,
.s_routing = cx25840_s_audio_routing,
.s_stream = cx25840_s_audio_stream,
};
static const struct v4l2_subdev_video_ops cx25840_video_ops = {
.s_routing = cx25840_s_video_routing,
.s_mbus_fmt = cx25840_s_mbus_fmt,
.s_stream = cx25840_s_stream,
.g_input_status = cx25840_g_input_status,
};
static const struct v4l2_subdev_vbi_ops cx25840_vbi_ops = {
.decode_vbi_line = cx25840_decode_vbi_line,
.s_raw_fmt = cx25840_s_raw_fmt,
.s_sliced_fmt = cx25840_s_sliced_fmt,
.g_sliced_fmt = cx25840_g_sliced_fmt,
};
static const struct v4l2_subdev_ops cx25840_ops = {
.core = &cx25840_core_ops,
.tuner = &cx25840_tuner_ops,
.audio = &cx25840_audio_ops,
.video = &cx25840_video_ops,
.vbi = &cx25840_vbi_ops,
.ir = &cx25840_ir_ops,
};
/* ----------------------------------------------------------------------- */
static u32 get_cx2388x_ident(struct i2c_client *client)
{
u32 ret;
/* Come out of digital power down */
cx25840_write(client, 0x000, 0);
/* Detecting whether the part is cx23885/7/8 is more
* difficult than it needs to be. No ID register. Instead we
* probe certain registers indicated in the datasheets to look
* for specific defaults that differ between the silicon designs. */
/* It's either 885/7 if the IR Tx Clk Divider register exists */
if (cx25840_read4(client, 0x204) & 0xffff) {
/* CX23885 returns bogus repetitive byte values for the DIF,
* which doesn't exist for it. (Ex. 8a8a8a8a or 31313131) */
ret = cx25840_read4(client, 0x300);
if (((ret & 0xffff0000) >> 16) == (ret & 0xffff)) {
/* No DIF */
ret = V4L2_IDENT_CX23885_AV;
} else {
/* CX23887 has a broken DIF, but the registers
* appear valid (but unused), good enough to detect. */
ret = V4L2_IDENT_CX23887_AV;
}
} else if (cx25840_read4(client, 0x300) & 0x0fffffff) {
/* DIF PLL Freq Word reg exists; chip must be a CX23888 */
ret = V4L2_IDENT_CX23888_AV;
} else {
v4l_err(client, "Unable to detect h/w, assuming cx23887\n");
ret = V4L2_IDENT_CX23887_AV;
}
/* Back into digital power down */
cx25840_write(client, 0x000, 2);
return ret;
}
static int cx25840_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct cx25840_state *state;
struct v4l2_subdev *sd;
int default_volume;
u32 id = V4L2_IDENT_NONE;
u16 device_id;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_dbg(1, cx25840_debug, client, "detecting cx25840 client on address 0x%x\n", client->addr << 1);
device_id = cx25840_read(client, 0x101) << 8;
device_id |= cx25840_read(client, 0x100);
v4l_dbg(1, cx25840_debug, client, "device_id = 0x%04x\n", device_id);
/* The high byte of the device ID should be
* 0x83 for the cx2583x and 0x84 for the cx2584x */
if ((device_id & 0xff00) == 0x8300) {
id = V4L2_IDENT_CX25836 + ((device_id >> 4) & 0xf) - 6;
} else if ((device_id & 0xff00) == 0x8400) {
id = V4L2_IDENT_CX25840 + ((device_id >> 4) & 0xf);
} else if (device_id == 0x0000) {
id = get_cx2388x_ident(client);
} else if ((device_id & 0xfff0) == 0x5A30) {
/* The CX23100 (0x5A3C = 23100) doesn't have an A/V decoder */
id = V4L2_IDENT_CX2310X_AV;
} else if ((device_id & 0xff) == (device_id >> 8)) {
v4l_err(client,
"likely a confused/unresponsive cx2388[578] A/V decoder"
" found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
v4l_err(client, "A method to reset it from the cx25840 driver"
" software is not known at this time\n");
return -ENODEV;
} else {
v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
return -ENODEV;
}
state = kzalloc(sizeof(struct cx25840_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &cx25840_ops);
switch (id) {
case V4L2_IDENT_CX23885_AV:
v4l_info(client, "cx23885 A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
break;
case V4L2_IDENT_CX23887_AV:
v4l_info(client, "cx23887 A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
break;
case V4L2_IDENT_CX23888_AV:
v4l_info(client, "cx23888 A/V decoder found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
break;
case V4L2_IDENT_CX2310X_AV:
v4l_info(client, "cx%d A/V decoder found @ 0x%x (%s)\n",
device_id, client->addr << 1, client->adapter->name);
break;
case V4L2_IDENT_CX25840:
case V4L2_IDENT_CX25841:
case V4L2_IDENT_CX25842:
case V4L2_IDENT_CX25843:
/* Note: revision '(device_id & 0x0f) == 2' was never built. The
marking skips from 0x1 == 22 to 0x3 == 23. */
v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n",
(device_id & 0xfff0) >> 4,
(device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1
: (device_id & 0x0f),
client->addr << 1, client->adapter->name);
break;
case V4L2_IDENT_CX25836:
case V4L2_IDENT_CX25837:
default:
v4l_info(client, "cx25%3x-%x found @ 0x%x (%s)\n",
(device_id & 0xfff0) >> 4, device_id & 0x0f,
client->addr << 1, client->adapter->name);
break;
}
state->c = client;
state->vid_input = CX25840_COMPOSITE7;
state->aud_input = CX25840_AUDIO8;
state->audclk_freq = 48000;
state->audmode = V4L2_TUNER_MODE_LANG1;
state->vbi_line_offset = 8;
state->id = id;
state->rev = device_id;
v4l2_ctrl_handler_init(&state->hdl, 9);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
V4L2_CID_CONTRAST, 0, 127, 1, 64);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
V4L2_CID_SATURATION, 0, 127, 1, 64);
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
if (!is_cx2583x(state)) {
default_volume = cx25840_read(client, 0x8d4);
/*
* Enforce the legacy PVR-350/MSP3400 to PVR-150/CX25843 volume
* scale mapping limits to avoid -ERANGE errors when
* initializing the volume control
*/
if (default_volume > 228) {
/* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */
default_volume = 228;
cx25840_write(client, 0x8d4, 228);
}
else if (default_volume < 20) {
/* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */
default_volume = 20;
cx25840_write(client, 0x8d4, 20);
}
default_volume = (((228 - default_volume) >> 1) + 23) << 9;
state->volume = v4l2_ctrl_new_std(&state->hdl,
&cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,
0, 65535, 65535 / 100, default_volume);
state->mute = v4l2_ctrl_new_std(&state->hdl,
&cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE,
0, 1, 1, 0);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
V4L2_CID_AUDIO_BALANCE,
0, 65535, 65535 / 100, 32768);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
V4L2_CID_AUDIO_BASS,
0, 65535, 65535 / 100, 32768);
v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops,
V4L2_CID_AUDIO_TREBLE,
0, 65535, 65535 / 100, 32768);
}
sd->ctrl_handler = &state->hdl;
if (state->hdl.error) {
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
}
if (!is_cx2583x(state))
v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(&state->hdl);
if (client->dev.platform_data) {
struct cx25840_platform_data *pdata = client->dev.platform_data;
state->pvr150_workaround = pdata->pvr150_workaround;
}
cx25840_ir_probe(sd);
return 0;
}
static int cx25840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cx25840_state *state = to_state(sd);
cx25840_ir_remove(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return 0;
}
static const struct i2c_device_id cx25840_id[] = {
{ "cx25840", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, cx25840_id);
static struct i2c_driver cx25840_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "cx25840",
},
.probe = cx25840_probe,
.remove = cx25840_remove,
.id_table = cx25840_id,
};
module_i2c_driver(cx25840_driver);
| gpl-2.0 |
vfalico/popcorn | sound/isa/sb/sb16_csp.c | 7642 | 33507 | /*
* Copyright (c) 1999 by Uros Bizjak <uros@kss-loka.si>
* Takashi Iwai <tiwai@suse.de>
*
* SB16ASP/AWE32 CSP control
*
* CSP microcode loader:
* alsa-tools/sb16_csp/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/sb16_csp.h>
#include <sound/initval.h>
MODULE_AUTHOR("Uros Bizjak <uros@kss-loka.si>");
MODULE_DESCRIPTION("ALSA driver for SB16 Creative Signal Processor");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE("sb16/mulaw_main.csp");
MODULE_FIRMWARE("sb16/alaw_main.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_init.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_playback.csp");
MODULE_FIRMWARE("sb16/ima_adpcm_capture.csp");
#ifdef SNDRV_LITTLE_ENDIAN
#define CSP_HDR_VALUE(a,b,c,d) ((a) | ((b)<<8) | ((c)<<16) | ((d)<<24))
#else
#define CSP_HDR_VALUE(a,b,c,d) ((d) | ((c)<<8) | ((b)<<16) | ((a)<<24))
#endif
#define RIFF_HEADER CSP_HDR_VALUE('R', 'I', 'F', 'F')
#define CSP__HEADER CSP_HDR_VALUE('C', 'S', 'P', ' ')
#define LIST_HEADER CSP_HDR_VALUE('L', 'I', 'S', 'T')
#define FUNC_HEADER CSP_HDR_VALUE('f', 'u', 'n', 'c')
#define CODE_HEADER CSP_HDR_VALUE('c', 'o', 'd', 'e')
#define INIT_HEADER CSP_HDR_VALUE('i', 'n', 'i', 't')
#define MAIN_HEADER CSP_HDR_VALUE('m', 'a', 'i', 'n')
/*
* RIFF data format
*/
struct riff_header {
__u32 name;
__u32 len;
};
struct desc_header {
struct riff_header info;
__u16 func_nr;
__u16 VOC_type;
__u16 flags_play_rec;
__u16 flags_16bit_8bit;
__u16 flags_stereo_mono;
__u16 flags_rates;
};
/*
* prototypes
*/
static void snd_sb_csp_free(struct snd_hwdep *hw);
static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file);
static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg);
static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file);
static int csp_detect(struct snd_sb *chip, int *version);
static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val);
static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val);
static int read_register(struct snd_sb *chip, unsigned char reg);
static int set_mode_register(struct snd_sb *chip, unsigned char mode);
static int get_version(struct snd_sb *chip);
static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
struct snd_sb_csp_microcode __user * code);
static int snd_sb_csp_unload(struct snd_sb_csp * p);
static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags);
static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode);
static int snd_sb_csp_check_version(struct snd_sb_csp * p);
static int snd_sb_csp_use(struct snd_sb_csp * p);
static int snd_sb_csp_unuse(struct snd_sb_csp * p);
static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels);
static int snd_sb_csp_stop(struct snd_sb_csp * p);
static int snd_sb_csp_pause(struct snd_sb_csp * p);
static int snd_sb_csp_restart(struct snd_sb_csp * p);
static int snd_sb_qsound_build(struct snd_sb_csp * p);
static void snd_sb_qsound_destroy(struct snd_sb_csp * p);
static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p);
static int init_proc_entry(struct snd_sb_csp * p, int device);
static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer);
/*
* Detect CSP chip and create a new instance
*/
int snd_sb_csp_new(struct snd_sb *chip, int device, struct snd_hwdep ** rhwdep)
{
struct snd_sb_csp *p;
int uninitialized_var(version);
int err;
struct snd_hwdep *hw;
if (rhwdep)
*rhwdep = NULL;
if (csp_detect(chip, &version))
return -ENODEV;
if ((err = snd_hwdep_new(chip->card, "SB16-CSP", device, &hw)) < 0)
return err;
if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) {
snd_device_free(chip->card, hw);
return -ENOMEM;
}
p->chip = chip;
p->version = version;
/* CSP operators */
p->ops.csp_use = snd_sb_csp_use;
p->ops.csp_unuse = snd_sb_csp_unuse;
p->ops.csp_autoload = snd_sb_csp_autoload;
p->ops.csp_start = snd_sb_csp_start;
p->ops.csp_stop = snd_sb_csp_stop;
p->ops.csp_qsound_transfer = snd_sb_csp_qsound_transfer;
mutex_init(&p->access_mutex);
sprintf(hw->name, "CSP v%d.%d", (version >> 4), (version & 0x0f));
hw->iface = SNDRV_HWDEP_IFACE_SB16CSP;
hw->private_data = p;
hw->private_free = snd_sb_csp_free;
/* operators - only write/ioctl */
hw->ops.open = snd_sb_csp_open;
hw->ops.ioctl = snd_sb_csp_ioctl;
hw->ops.release = snd_sb_csp_release;
/* create a proc entry */
init_proc_entry(p, device);
if (rhwdep)
*rhwdep = hw;
return 0;
}
/*
* free_private for hwdep instance
*/
static void snd_sb_csp_free(struct snd_hwdep *hwdep)
{
int i;
struct snd_sb_csp *p = hwdep->private_data;
if (p) {
if (p->running & SNDRV_SB_CSP_ST_RUNNING)
snd_sb_csp_stop(p);
for (i = 0; i < ARRAY_SIZE(p->csp_programs); ++i)
release_firmware(p->csp_programs[i]);
kfree(p);
}
}
/* ------------------------------ */
/*
* open the device exclusively
*/
static int snd_sb_csp_open(struct snd_hwdep * hw, struct file *file)
{
struct snd_sb_csp *p = hw->private_data;
return (snd_sb_csp_use(p));
}
/*
* ioctl for hwdep device:
*/
static int snd_sb_csp_ioctl(struct snd_hwdep * hw, struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_sb_csp *p = hw->private_data;
struct snd_sb_csp_info info;
struct snd_sb_csp_start start_info;
int err;
if (snd_BUG_ON(!p))
return -EINVAL;
if (snd_sb_csp_check_version(p))
return -ENODEV;
switch (cmd) {
/* get information */
case SNDRV_SB_CSP_IOCTL_INFO:
*info.codec_name = *p->codec_name;
info.func_nr = p->func_nr;
info.acc_format = p->acc_format;
info.acc_channels = p->acc_channels;
info.acc_width = p->acc_width;
info.acc_rates = p->acc_rates;
info.csp_mode = p->mode;
info.run_channels = p->run_channels;
info.run_width = p->run_width;
info.version = p->version;
info.state = p->running;
if (copy_to_user((void __user *)arg, &info, sizeof(info)))
err = -EFAULT;
else
err = 0;
break;
/* load CSP microcode */
case SNDRV_SB_CSP_IOCTL_LOAD_CODE:
err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
-EBUSY : snd_sb_csp_riff_load(p, (struct snd_sb_csp_microcode __user *) arg));
break;
case SNDRV_SB_CSP_IOCTL_UNLOAD_CODE:
err = (p->running & SNDRV_SB_CSP_ST_RUNNING ?
-EBUSY : snd_sb_csp_unload(p));
break;
/* change CSP running state */
case SNDRV_SB_CSP_IOCTL_START:
if (copy_from_user(&start_info, (void __user *) arg, sizeof(start_info)))
err = -EFAULT;
else
err = snd_sb_csp_start(p, start_info.sample_width, start_info.channels);
break;
case SNDRV_SB_CSP_IOCTL_STOP:
err = snd_sb_csp_stop(p);
break;
case SNDRV_SB_CSP_IOCTL_PAUSE:
err = snd_sb_csp_pause(p);
break;
case SNDRV_SB_CSP_IOCTL_RESTART:
err = snd_sb_csp_restart(p);
break;
default:
err = -ENOTTY;
break;
}
return err;
}
/*
* close the device
*/
static int snd_sb_csp_release(struct snd_hwdep * hw, struct file *file)
{
struct snd_sb_csp *p = hw->private_data;
return (snd_sb_csp_unuse(p));
}
/* ------------------------------ */
/*
* acquire device
*/
static int snd_sb_csp_use(struct snd_sb_csp * p)
{
mutex_lock(&p->access_mutex);
if (p->used) {
mutex_unlock(&p->access_mutex);
return -EAGAIN;
}
p->used++;
mutex_unlock(&p->access_mutex);
return 0;
}
/*
* release device
*/
static int snd_sb_csp_unuse(struct snd_sb_csp * p)
{
mutex_lock(&p->access_mutex);
p->used--;
mutex_unlock(&p->access_mutex);
return 0;
}
/*
* load microcode via ioctl:
* code is user-space pointer
*/
static int snd_sb_csp_riff_load(struct snd_sb_csp * p,
struct snd_sb_csp_microcode __user * mcode)
{
struct snd_sb_csp_mc_header info;
unsigned char __user *data_ptr;
unsigned char __user *data_end;
unsigned short func_nr = 0;
struct riff_header file_h, item_h, code_h;
__u32 item_type;
struct desc_header funcdesc_h;
unsigned long flags;
int err;
if (copy_from_user(&info, mcode, sizeof(info)))
return -EFAULT;
data_ptr = mcode->data;
if (copy_from_user(&file_h, data_ptr, sizeof(file_h)))
return -EFAULT;
if ((file_h.name != RIFF_HEADER) ||
(le32_to_cpu(file_h.len) >= SNDRV_SB_CSP_MAX_MICROCODE_FILE_SIZE - sizeof(file_h))) {
snd_printd("%s: Invalid RIFF header\n", __func__);
return -EINVAL;
}
data_ptr += sizeof(file_h);
data_end = data_ptr + le32_to_cpu(file_h.len);
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
if (item_type != CSP__HEADER) {
snd_printd("%s: Invalid RIFF file type\n", __func__);
return -EINVAL;
}
data_ptr += sizeof (item_type);
for (; data_ptr < data_end; data_ptr += le32_to_cpu(item_h.len)) {
if (copy_from_user(&item_h, data_ptr, sizeof(item_h)))
return -EFAULT;
data_ptr += sizeof(item_h);
if (item_h.name != LIST_HEADER)
continue;
if (copy_from_user(&item_type, data_ptr, sizeof(item_type)))
return -EFAULT;
switch (item_type) {
case FUNC_HEADER:
if (copy_from_user(&funcdesc_h, data_ptr + sizeof(item_type), sizeof(funcdesc_h)))
return -EFAULT;
func_nr = le16_to_cpu(funcdesc_h.func_nr);
break;
case CODE_HEADER:
if (func_nr != info.func_req)
break; /* not required function, try next */
data_ptr += sizeof(item_type);
/* destroy QSound mixer element */
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_sb_qsound_destroy(p);
}
/* Clear all flags */
p->running = 0;
p->mode = 0;
/* load microcode blocks */
for (;;) {
if (data_ptr >= data_end)
return -EINVAL;
if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
return -EFAULT;
/* init microcode blocks */
if (code_h.name != INIT_HEADER)
break;
data_ptr += sizeof(code_h);
err = snd_sb_csp_load_user(p, data_ptr, le32_to_cpu(code_h.len),
SNDRV_SB_CSP_LOAD_INITBLOCK);
if (err)
return err;
data_ptr += le32_to_cpu(code_h.len);
}
/* main microcode block */
if (copy_from_user(&code_h, data_ptr, sizeof(code_h)))
return -EFAULT;
if (code_h.name != MAIN_HEADER) {
snd_printd("%s: Missing 'main' microcode\n", __func__);
return -EINVAL;
}
data_ptr += sizeof(code_h);
err = snd_sb_csp_load_user(p, data_ptr,
le32_to_cpu(code_h.len), 0);
if (err)
return err;
/* fill in codec header */
strlcpy(p->codec_name, info.codec_name, sizeof(p->codec_name));
p->func_nr = func_nr;
p->mode = le16_to_cpu(funcdesc_h.flags_play_rec);
switch (le16_to_cpu(funcdesc_h.VOC_type)) {
case 0x0001: /* QSound decoder */
if (le16_to_cpu(funcdesc_h.flags_play_rec) == SNDRV_SB_CSP_MODE_DSP_WRITE) {
if (snd_sb_qsound_build(p) == 0)
/* set QSound flag and clear all other mode flags */
p->mode = SNDRV_SB_CSP_MODE_QSOUND;
}
p->acc_format = 0;
break;
case 0x0006: /* A Law codec */
p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
break;
case 0x0007: /* Mu Law codec */
p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
break;
case 0x0011: /* what Creative thinks is IMA ADPCM codec */
case 0x0200: /* Creative ADPCM codec */
p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
break;
case 201: /* Text 2 Speech decoder */
/* TODO: Text2Speech handling routines */
p->acc_format = 0;
break;
case 0x0202: /* Fast Speech 8 codec */
case 0x0203: /* Fast Speech 10 codec */
p->acc_format = SNDRV_PCM_FMTBIT_SPECIAL;
break;
default: /* other codecs are unsupported */
p->acc_format = p->acc_width = p->acc_rates = 0;
p->mode = 0;
snd_printd("%s: Unsupported CSP codec type: 0x%04x\n",
__func__,
le16_to_cpu(funcdesc_h.VOC_type));
return -EINVAL;
}
p->acc_channels = le16_to_cpu(funcdesc_h.flags_stereo_mono);
p->acc_width = le16_to_cpu(funcdesc_h.flags_16bit_8bit);
p->acc_rates = le16_to_cpu(funcdesc_h.flags_rates);
/* Decouple CSP from IRQ and DMAREQ lines */
spin_lock_irqsave(&p->chip->reg_lock, flags);
set_mode_register(p->chip, 0xfc);
set_mode_register(p->chip, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
/* finished loading successfully */
p->running = SNDRV_SB_CSP_ST_LOADED; /* set LOADED flag */
return 0;
}
}
snd_printd("%s: Function #%d not found\n", __func__, info.func_req);
return -EINVAL;
}
/*
* unload CSP microcode
*/
static int snd_sb_csp_unload(struct snd_sb_csp * p)
{
if (p->running & SNDRV_SB_CSP_ST_RUNNING)
return -EBUSY;
if (!(p->running & SNDRV_SB_CSP_ST_LOADED))
return -ENXIO;
/* clear supported formats */
p->acc_format = 0;
p->acc_channels = p->acc_width = p->acc_rates = 0;
/* destroy QSound mixer element */
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_sb_qsound_destroy(p);
}
/* clear all flags */
p->running = 0;
p->mode = 0;
return 0;
}
/*
* send command sequence to DSP
*/
static inline int command_seq(struct snd_sb *chip, const unsigned char *seq, int size)
{
int i;
for (i = 0; i < size; i++) {
if (!snd_sbdsp_command(chip, seq[i]))
return -EIO;
}
return 0;
}
/*
* set CSP codec parameter
*/
static int set_codec_parameter(struct snd_sb *chip, unsigned char par, unsigned char val)
{
unsigned char dsp_cmd[3];
dsp_cmd[0] = 0x05; /* CSP set codec parameter */
dsp_cmd[1] = val; /* Parameter value */
dsp_cmd[2] = par; /* Parameter */
command_seq(chip, dsp_cmd, 3);
snd_sbdsp_command(chip, 0x03); /* DSP read? */
if (snd_sbdsp_get_byte(chip) != par)
return -EIO;
return 0;
}
/*
* set CSP register
*/
static int set_register(struct snd_sb *chip, unsigned char reg, unsigned char val)
{
unsigned char dsp_cmd[3];
dsp_cmd[0] = 0x0e; /* CSP set register */
dsp_cmd[1] = reg; /* CSP Register */
dsp_cmd[2] = val; /* value */
return command_seq(chip, dsp_cmd, 3);
}
/*
* read CSP register
* return < 0 -> error
*/
static int read_register(struct snd_sb *chip, unsigned char reg)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x0f; /* CSP read register */
dsp_cmd[1] = reg; /* CSP Register */
command_seq(chip, dsp_cmd, 2);
return snd_sbdsp_get_byte(chip); /* Read DSP value */
}
/*
* set CSP mode register
*/
static int set_mode_register(struct snd_sb *chip, unsigned char mode)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x04; /* CSP set mode register */
dsp_cmd[1] = mode; /* mode */
return command_seq(chip, dsp_cmd, 2);
}
/*
* Detect CSP
* return 0 if CSP exists.
*/
static int csp_detect(struct snd_sb *chip, int *version)
{
unsigned char csp_test1, csp_test2;
unsigned long flags;
int result = -ENODEV;
spin_lock_irqsave(&chip->reg_lock, flags);
set_codec_parameter(chip, 0x00, 0x00);
set_mode_register(chip, 0xfc); /* 0xfc = ?? */
csp_test1 = read_register(chip, 0x83);
set_register(chip, 0x83, ~csp_test1);
csp_test2 = read_register(chip, 0x83);
if (csp_test2 != (csp_test1 ^ 0xff))
goto __fail;
set_register(chip, 0x83, csp_test1);
csp_test2 = read_register(chip, 0x83);
if (csp_test2 != csp_test1)
goto __fail;
set_mode_register(chip, 0x00); /* 0x00 = ? */
*version = get_version(chip);
snd_sbdsp_reset(chip); /* reset DSP after getversion! */
if (*version >= 0x10 && *version <= 0x1f)
result = 0; /* valid version id */
__fail:
spin_unlock_irqrestore(&chip->reg_lock, flags);
return result;
}
/*
* get CSP version number
*/
static int get_version(struct snd_sb *chip)
{
unsigned char dsp_cmd[2];
dsp_cmd[0] = 0x08; /* SB_DSP_!something! */
dsp_cmd[1] = 0x03; /* get chip version id? */
command_seq(chip, dsp_cmd, 2);
return (snd_sbdsp_get_byte(chip));
}
/*
* check if the CSP version is valid
*/
static int snd_sb_csp_check_version(struct snd_sb_csp * p)
{
if (p->version < 0x10 || p->version > 0x1f) {
snd_printd("%s: Invalid CSP version: 0x%x\n", __func__, p->version);
return 1;
}
return 0;
}
/*
* download microcode to CSP (microcode should have one "main" block).
*/
static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int size, int load_flags)
{
int status, i;
int err;
int result = -EIO;
unsigned long flags;
spin_lock_irqsave(&p->chip->reg_lock, flags);
snd_sbdsp_command(p->chip, 0x01); /* CSP download command */
if (snd_sbdsp_get_byte(p->chip)) {
snd_printd("%s: Download command failed\n", __func__);
goto __fail;
}
/* Send CSP low byte (size - 1) */
snd_sbdsp_command(p->chip, (unsigned char)(size - 1));
/* Send high byte */
snd_sbdsp_command(p->chip, (unsigned char)((size - 1) >> 8));
/* send microcode sequence */
/* load from kernel space */
while (size--) {
if (!snd_sbdsp_command(p->chip, *buf++))
goto __fail;
}
if (snd_sbdsp_get_byte(p->chip))
goto __fail;
if (load_flags & SNDRV_SB_CSP_LOAD_INITBLOCK) {
i = 0;
/* some codecs (FastSpeech) take some time to initialize */
while (1) {
snd_sbdsp_command(p->chip, 0x03);
status = snd_sbdsp_get_byte(p->chip);
if (status == 0x55 || ++i >= 10)
break;
udelay (10);
}
if (status != 0x55) {
snd_printd("%s: Microcode initialization failed\n", __func__);
goto __fail;
}
} else {
/*
* Read mixer register SB_DSP4_DMASETUP after loading 'main' code.
* Start CSP chip if no 16bit DMA channel is set - some kind
* of autorun or perhaps a bugfix?
*/
spin_lock(&p->chip->mixer_lock);
status = snd_sbmixer_read(p->chip, SB_DSP4_DMASETUP);
spin_unlock(&p->chip->mixer_lock);
if (!(status & (SB_DMASETUP_DMA7 | SB_DMASETUP_DMA6 | SB_DMASETUP_DMA5))) {
err = (set_codec_parameter(p->chip, 0xaa, 0x00) ||
set_codec_parameter(p->chip, 0xff, 0x00));
snd_sbdsp_reset(p->chip); /* really! */
if (err)
goto __fail;
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
set_mode_register(p->chip, 0x70); /* 70 = RUN */
}
}
result = 0;
__fail:
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
return result;
}
static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags)
{
int err;
unsigned char *kbuf;
kbuf = memdup_user(buf, size);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
err = snd_sb_csp_load(p, kbuf, size, load_flags);
kfree(kbuf);
return err;
}
static int snd_sb_csp_firmware_load(struct snd_sb_csp *p, int index, int flags)
{
static const char *const names[] = {
"sb16/mulaw_main.csp",
"sb16/alaw_main.csp",
"sb16/ima_adpcm_init.csp",
"sb16/ima_adpcm_playback.csp",
"sb16/ima_adpcm_capture.csp",
};
const struct firmware *program;
BUILD_BUG_ON(ARRAY_SIZE(names) != CSP_PROGRAM_COUNT);
program = p->csp_programs[index];
if (!program) {
int err = request_firmware(&program, names[index],
p->chip->card->dev);
if (err < 0)
return err;
p->csp_programs[index] = program;
}
return snd_sb_csp_load(p, program->data, program->size, flags);
}
/*
* autoload hardware codec if necessary
* return 0 if CSP is loaded and ready to run (p->running != 0)
*/
static int snd_sb_csp_autoload(struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode)
{
unsigned long flags;
int err = 0;
/* if CSP is running or manually loaded then exit */
if (p->running & (SNDRV_SB_CSP_ST_RUNNING | SNDRV_SB_CSP_ST_LOADED))
return -EBUSY;
/* autoload microcode only if requested hardware codec is not already loaded */
if (((1 << pcm_sfmt) & p->acc_format) && (play_rec_mode & p->mode)) {
p->running = SNDRV_SB_CSP_ST_AUTO;
} else {
switch (pcm_sfmt) {
case SNDRV_PCM_FORMAT_MU_LAW:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_MULAW, 0);
p->acc_format = SNDRV_PCM_FMTBIT_MU_LAW;
p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
break;
case SNDRV_PCM_FORMAT_A_LAW:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ALAW, 0);
p->acc_format = SNDRV_PCM_FMTBIT_A_LAW;
p->mode = SNDRV_SB_CSP_MODE_DSP_READ | SNDRV_SB_CSP_MODE_DSP_WRITE;
break;
case SNDRV_PCM_FORMAT_IMA_ADPCM:
err = snd_sb_csp_firmware_load(p, CSP_PROGRAM_ADPCM_INIT,
SNDRV_SB_CSP_LOAD_INITBLOCK);
if (err)
break;
if (play_rec_mode == SNDRV_SB_CSP_MODE_DSP_WRITE) {
err = snd_sb_csp_firmware_load
(p, CSP_PROGRAM_ADPCM_PLAYBACK, 0);
p->mode = SNDRV_SB_CSP_MODE_DSP_WRITE;
} else {
err = snd_sb_csp_firmware_load
(p, CSP_PROGRAM_ADPCM_CAPTURE, 0);
p->mode = SNDRV_SB_CSP_MODE_DSP_READ;
}
p->acc_format = SNDRV_PCM_FMTBIT_IMA_ADPCM;
break;
default:
/* Decouple CSP from IRQ and DMAREQ lines */
if (p->running & SNDRV_SB_CSP_ST_AUTO) {
spin_lock_irqsave(&p->chip->reg_lock, flags);
set_mode_register(p->chip, 0xfc);
set_mode_register(p->chip, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
p->running = 0; /* clear autoloaded flag */
}
return -EINVAL;
}
if (err) {
p->acc_format = 0;
p->acc_channels = p->acc_width = p->acc_rates = 0;
p->running = 0; /* clear autoloaded flag */
p->mode = 0;
return (err);
} else {
p->running = SNDRV_SB_CSP_ST_AUTO; /* set autoloaded flag */
p->acc_width = SNDRV_SB_CSP_SAMPLE_16BIT; /* only 16 bit data */
p->acc_channels = SNDRV_SB_CSP_MONO | SNDRV_SB_CSP_STEREO;
p->acc_rates = SNDRV_SB_CSP_RATE_ALL; /* HW codecs accept all rates */
}
}
return (p->running & SNDRV_SB_CSP_ST_AUTO) ? 0 : -ENXIO;
}
/*
* start CSP
*/
static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channels)
{
unsigned char s_type; /* sample type */
unsigned char mixL, mixR;
int result = -EIO;
unsigned long flags;
if (!(p->running & (SNDRV_SB_CSP_ST_LOADED | SNDRV_SB_CSP_ST_AUTO))) {
snd_printd("%s: Microcode not loaded\n", __func__);
return -ENXIO;
}
if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
snd_printd("%s: CSP already running\n", __func__);
return -EBUSY;
}
if (!(sample_width & p->acc_width)) {
snd_printd("%s: Unsupported PCM sample width\n", __func__);
return -EINVAL;
}
if (!(channels & p->acc_channels)) {
snd_printd("%s: Invalid number of channels\n", __func__);
return -EINVAL;
}
/* Mute PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_lock(&p->chip->reg_lock);
set_mode_register(p->chip, 0xc0); /* c0 = STOP */
set_mode_register(p->chip, 0x70); /* 70 = RUN */
s_type = 0x00;
if (channels == SNDRV_SB_CSP_MONO)
s_type = 0x11; /* 000n 000n (n = 1 if mono) */
if (sample_width == SNDRV_SB_CSP_SAMPLE_8BIT)
s_type |= 0x22; /* 00dX 00dX (d = 1 if 8 bit samples) */
if (set_codec_parameter(p->chip, 0x81, s_type)) {
snd_printd("%s: Set sample type command failed\n", __func__);
goto __fail;
}
if (set_codec_parameter(p->chip, 0x80, 0x00)) {
snd_printd("%s: Codec start command failed\n", __func__);
goto __fail;
}
p->run_width = sample_width;
p->run_channels = channels;
p->running |= SNDRV_SB_CSP_ST_RUNNING;
if (p->mode & SNDRV_SB_CSP_MODE_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* enable QSound decoder */
set_codec_parameter(p->chip, 0x00, 0xff);
set_codec_parameter(p->chip, 0x01, 0xff);
p->running |= SNDRV_SB_CSP_ST_QSOUND;
/* set QSound startup value */
snd_sb_csp_qsound_transfer(p);
}
result = 0;
__fail:
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
return result;
}
/*
* stop CSP
*/
static int snd_sb_csp_stop(struct snd_sb_csp * p)
{
int result;
unsigned char mixL, mixR;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
return 0;
/* Mute PCM volume */
spin_lock_irqsave(&p->chip->mixer_lock, flags);
mixL = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV);
mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7);
spin_lock(&p->chip->reg_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* disable QSound decoder */
set_codec_parameter(p->chip, 0x00, 0x00);
set_codec_parameter(p->chip, 0x01, 0x00);
p->running &= ~SNDRV_SB_CSP_ST_QSOUND;
}
result = set_mode_register(p->chip, 0xc0); /* c0 = STOP */
spin_unlock(&p->chip->reg_lock);
/* restore PCM volume */
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL);
snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR);
spin_unlock_irqrestore(&p->chip->mixer_lock, flags);
if (!(result))
p->running &= ~(SNDRV_SB_CSP_ST_PAUSED | SNDRV_SB_CSP_ST_RUNNING);
return result;
}
/*
* pause CSP codec and hold DMA transfer
*/
static int snd_sb_csp_pause(struct snd_sb_csp * p)
{
int result;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_RUNNING))
return -EBUSY;
spin_lock_irqsave(&p->chip->reg_lock, flags);
result = set_codec_parameter(p->chip, 0x80, 0xff);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
if (!(result))
p->running |= SNDRV_SB_CSP_ST_PAUSED;
return result;
}
/*
* restart CSP codec and resume DMA transfer
*/
static int snd_sb_csp_restart(struct snd_sb_csp * p)
{
int result;
unsigned long flags;
if (!(p->running & SNDRV_SB_CSP_ST_PAUSED))
return -EBUSY;
spin_lock_irqsave(&p->chip->reg_lock, flags);
result = set_codec_parameter(p->chip, 0x80, 0x00);
spin_unlock_irqrestore(&p->chip->reg_lock, flags);
if (!(result))
p->running &= ~SNDRV_SB_CSP_ST_PAUSED;
return result;
}
/* ------------------------------ */
/*
* QSound mixer control for PCM
*/
#define snd_sb_qsound_switch_info snd_ctl_boolean_mono_info
static int snd_sb_qsound_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = p->q_enabled ? 1 : 0;
return 0;
}
static int snd_sb_qsound_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned char nval;
nval = ucontrol->value.integer.value[0] & 0x01;
spin_lock_irqsave(&p->q_lock, flags);
change = p->q_enabled != nval;
p->q_enabled = nval;
spin_unlock_irqrestore(&p->q_lock, flags);
return change;
}
static int snd_sb_qsound_space_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
return 0;
}
static int snd_sb_qsound_space_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
spin_lock_irqsave(&p->q_lock, flags);
ucontrol->value.integer.value[0] = p->qpos_left;
ucontrol->value.integer.value[1] = p->qpos_right;
spin_unlock_irqrestore(&p->q_lock, flags);
return 0;
}
static int snd_sb_qsound_space_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_sb_csp *p = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int change;
unsigned char nval1, nval2;
nval1 = ucontrol->value.integer.value[0];
if (nval1 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT)
nval1 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
nval2 = ucontrol->value.integer.value[1];
if (nval2 > SNDRV_SB_CSP_QSOUND_MAX_RIGHT)
nval2 = SNDRV_SB_CSP_QSOUND_MAX_RIGHT;
spin_lock_irqsave(&p->q_lock, flags);
change = p->qpos_left != nval1 || p->qpos_right != nval2;
p->qpos_left = nval1;
p->qpos_right = nval2;
p->qpos_changed = change;
spin_unlock_irqrestore(&p->q_lock, flags);
return change;
}
static struct snd_kcontrol_new snd_sb_qsound_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Switch",
.info = snd_sb_qsound_switch_info,
.get = snd_sb_qsound_switch_get,
.put = snd_sb_qsound_switch_put
};
static struct snd_kcontrol_new snd_sb_qsound_space = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Space",
.info = snd_sb_qsound_space_info,
.get = snd_sb_qsound_space_get,
.put = snd_sb_qsound_space_put
};
static int snd_sb_qsound_build(struct snd_sb_csp * p)
{
struct snd_card *card;
int err;
if (snd_BUG_ON(!p))
return -EINVAL;
card = p->chip->card;
p->qpos_left = p->qpos_right = SNDRV_SB_CSP_QSOUND_MAX_RIGHT / 2;
p->qpos_changed = 0;
spin_lock_init(&p->q_lock);
if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
goto __error;
if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
goto __error;
return 0;
__error:
snd_sb_qsound_destroy(p);
return err;
}
static void snd_sb_qsound_destroy(struct snd_sb_csp * p)
{
struct snd_card *card;
unsigned long flags;
if (snd_BUG_ON(!p))
return;
card = p->chip->card;
down_write(&card->controls_rwsem);
if (p->qsound_switch)
snd_ctl_remove(card, p->qsound_switch);
if (p->qsound_space)
snd_ctl_remove(card, p->qsound_space);
up_write(&card->controls_rwsem);
/* cancel pending transfer of QSound parameters */
spin_lock_irqsave (&p->q_lock, flags);
p->qpos_changed = 0;
spin_unlock_irqrestore (&p->q_lock, flags);
}
/*
* Transfer qsound parameters to CSP,
* function should be called from interrupt routine
*/
static int snd_sb_csp_qsound_transfer(struct snd_sb_csp * p)
{
int err = -ENXIO;
spin_lock(&p->q_lock);
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
set_codec_parameter(p->chip, 0xe0, 0x01);
/* left channel */
set_codec_parameter(p->chip, 0x00, p->qpos_left);
set_codec_parameter(p->chip, 0x02, 0x00);
/* right channel */
set_codec_parameter(p->chip, 0x00, p->qpos_right);
set_codec_parameter(p->chip, 0x03, 0x00);
err = 0;
}
p->qpos_changed = 0;
spin_unlock(&p->q_lock);
return err;
}
/* ------------------------------ */
/*
* proc interface
*/
static int init_proc_entry(struct snd_sb_csp * p, int device)
{
char name[16];
struct snd_info_entry *entry;
sprintf(name, "cspD%d", device);
if (! snd_card_proc_new(p->chip->card, name, &entry))
snd_info_set_text_ops(entry, p, info_read);
return 0;
}
static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_sb_csp *p = entry->private_data;
snd_iprintf(buffer, "Creative Signal Processor [v%d.%d]\n", (p->version >> 4), (p->version & 0x0f));
snd_iprintf(buffer, "State: %cx%c%c%c\n", ((p->running & SNDRV_SB_CSP_ST_QSOUND) ? 'Q' : '-'),
((p->running & SNDRV_SB_CSP_ST_PAUSED) ? 'P' : '-'),
((p->running & SNDRV_SB_CSP_ST_RUNNING) ? 'R' : '-'),
((p->running & SNDRV_SB_CSP_ST_LOADED) ? 'L' : '-'));
if (p->running & SNDRV_SB_CSP_ST_LOADED) {
snd_iprintf(buffer, "Codec: %s [func #%d]\n", p->codec_name, p->func_nr);
snd_iprintf(buffer, "Sample rates: ");
if (p->acc_rates == SNDRV_SB_CSP_RATE_ALL) {
snd_iprintf(buffer, "All\n");
} else {
snd_iprintf(buffer, "%s%s%s%s\n",
((p->acc_rates & SNDRV_SB_CSP_RATE_8000) ? "8000Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_11025) ? "11025Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_22050) ? "22050Hz " : ""),
((p->acc_rates & SNDRV_SB_CSP_RATE_44100) ? "44100Hz" : ""));
}
if (p->mode == SNDRV_SB_CSP_MODE_QSOUND) {
snd_iprintf(buffer, "QSound decoder %sabled\n",
p->q_enabled ? "en" : "dis");
} else {
snd_iprintf(buffer, "PCM format ID: 0x%x (%s/%s) [%s/%s] [%s/%s]\n",
p->acc_format,
((p->acc_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? "16bit" : "-"),
((p->acc_width & SNDRV_SB_CSP_SAMPLE_8BIT) ? "8bit" : "-"),
((p->acc_channels & SNDRV_SB_CSP_MONO) ? "mono" : "-"),
((p->acc_channels & SNDRV_SB_CSP_STEREO) ? "stereo" : "-"),
((p->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) ? "playback" : "-"),
((p->mode & SNDRV_SB_CSP_MODE_DSP_READ) ? "capture" : "-"));
}
}
if (p->running & SNDRV_SB_CSP_ST_AUTO) {
snd_iprintf(buffer, "Autoloaded Mu-Law, A-Law or Ima-ADPCM hardware codec\n");
}
if (p->running & SNDRV_SB_CSP_ST_RUNNING) {
snd_iprintf(buffer, "Processing %dbit %s PCM samples\n",
((p->run_width & SNDRV_SB_CSP_SAMPLE_16BIT) ? 16 : 8),
((p->run_channels & SNDRV_SB_CSP_MONO) ? "mono" : "stereo"));
}
if (p->running & SNDRV_SB_CSP_ST_QSOUND) {
snd_iprintf(buffer, "Qsound position: left = 0x%x, right = 0x%x\n",
p->qpos_left, p->qpos_right);
}
}
/* */
EXPORT_SYMBOL(snd_sb_csp_new);
/*
* INIT part
*/
static int __init alsa_sb_csp_init(void)
{
return 0;
}
static void __exit alsa_sb_csp_exit(void)
{
}
module_init(alsa_sb_csp_init)
module_exit(alsa_sb_csp_exit)
| gpl-2.0 |
TeamLGOG/android_kernel_lge_f320k | fs/afs/flock.c | 8410 | 16151 | /* AFS file locking support
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "internal.h"
#define AFS_LOCK_GRANTED 0
#define AFS_LOCK_PENDING 1
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
static void afs_fl_release_private(struct file_lock *fl);
static struct workqueue_struct *afs_lock_manager;
static DEFINE_MUTEX(afs_lock_manager_mutex);
static const struct file_lock_operations afs_lock_ops = {
.fl_copy_lock = afs_fl_copy_lock,
.fl_release_private = afs_fl_release_private,
};
/*
* initialise the lock manager thread if it isn't already running
*/
static int afs_init_lock_manager(void)
{
int ret;
ret = 0;
if (!afs_lock_manager) {
mutex_lock(&afs_lock_manager_mutex);
if (!afs_lock_manager) {
afs_lock_manager =
create_singlethread_workqueue("kafs_lockd");
if (!afs_lock_manager)
ret = -ENOMEM;
}
mutex_unlock(&afs_lock_manager_mutex);
}
return ret;
}
/*
* destroy the lock manager thread if it's running
*/
void __exit afs_kill_lock_manager(void)
{
if (afs_lock_manager)
destroy_workqueue(afs_lock_manager);
}
/*
* if the callback is broken on this vnode, then the lock may now be available
*/
void afs_lock_may_be_available(struct afs_vnode *vnode)
{
_enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
}
/*
* the lock will time out in 5 minutes unless we extend it, so schedule
* extension in a bit less than that time
*/
static void afs_schedule_lock_extension(struct afs_vnode *vnode)
{
queue_delayed_work(afs_lock_manager, &vnode->lock_work,
AFS_LOCKWAIT * HZ / 2);
}
/*
* grant one or more locks (readlocks are allowed to jump the queue if the
* first lock in the queue is itself a readlock)
* - the caller must hold the vnode lock
*/
static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
{
struct file_lock *p, *_p;
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
if (fl->fl_type == F_RDLCK) {
list_for_each_entry_safe(p, _p, &vnode->pending_locks,
fl_u.afs.link) {
if (p->fl_type == F_RDLCK) {
p->fl_u.afs.state = AFS_LOCK_GRANTED;
list_move_tail(&p->fl_u.afs.link,
&vnode->granted_locks);
wake_up(&p->fl_wait);
}
}
}
}
/*
* do work for a lock, including:
* - probing for a lock we're waiting on but didn't get immediately
* - extending a lock that's close to timing out
*/
void afs_lock_work(struct work_struct *work)
{
struct afs_vnode *vnode =
container_of(work, struct afs_vnode, lock_work.work);
struct file_lock *fl;
afs_lock_type_t type;
struct key *key;
int ret;
_enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
spin_lock(&vnode->lock);
if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
_debug("unlock");
spin_unlock(&vnode->lock);
/* attempt to release the server lock; if it fails, we just
* wait 5 minutes and it'll time out anyway */
ret = afs_vnode_release_lock(vnode, vnode->unlock_key);
if (ret < 0)
printk(KERN_WARNING "AFS:"
" Failed to release lock on {%x:%x} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
key_put(vnode->unlock_key);
vnode->unlock_key = NULL;
clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
}
/* if we've got a lock, then it must be time to extend that lock as AFS
* locks time out after 5 minutes */
if (!list_empty(&vnode->granted_locks)) {
_debug("extend");
if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
BUG();
fl = list_entry(vnode->granted_locks.next,
struct file_lock, fl_u.afs.link);
key = key_get(fl->fl_file->private_data);
spin_unlock(&vnode->lock);
ret = afs_vnode_extend_lock(vnode, key);
clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
key_put(key);
switch (ret) {
case 0:
afs_schedule_lock_extension(vnode);
break;
default:
/* ummm... we failed to extend the lock - retry
* extension shortly */
printk(KERN_WARNING "AFS:"
" Failed to extend lock on {%x:%x} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
queue_delayed_work(afs_lock_manager, &vnode->lock_work,
HZ * 10);
break;
}
_leave(" [extend]");
return;
}
/* if we don't have a granted lock, then we must've been called back by
* the server, and so if might be possible to get a lock we're
* currently waiting for */
if (!list_empty(&vnode->pending_locks)) {
_debug("get");
if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
BUG();
fl = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
key = key_get(fl->fl_file->private_data);
type = (fl->fl_type == F_RDLCK) ?
AFS_LOCK_READ : AFS_LOCK_WRITE;
spin_unlock(&vnode->lock);
ret = afs_vnode_set_lock(vnode, key, type);
clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
switch (ret) {
case -EWOULDBLOCK:
_debug("blocked");
break;
case 0:
_debug("acquired");
if (type == AFS_LOCK_READ)
set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
else
set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
ret = AFS_LOCK_GRANTED;
default:
spin_lock(&vnode->lock);
/* the pending lock may have been withdrawn due to a
* signal */
if (list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link) == fl) {
fl->fl_u.afs.state = ret;
if (ret == AFS_LOCK_GRANTED)
afs_grant_locks(vnode, fl);
else
list_del_init(&fl->fl_u.afs.link);
wake_up(&fl->fl_wait);
spin_unlock(&vnode->lock);
} else {
_debug("withdrawn");
clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
spin_unlock(&vnode->lock);
afs_vnode_release_lock(vnode, key);
if (!list_empty(&vnode->pending_locks))
afs_lock_may_be_available(vnode);
}
break;
}
key_put(key);
_leave(" [pend]");
return;
}
/* looks like the lock request was withdrawn on a signal */
spin_unlock(&vnode->lock);
_leave(" [no locks]");
}
/*
* pass responsibility for the unlocking of a vnode on the server to the
* manager thread, lest a pending signal in the calling thread interrupt
* AF_RXRPC
* - the caller must hold the vnode lock
*/
static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
{
cancel_delayed_work(&vnode->lock_work);
if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
!test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
BUG();
if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
BUG();
vnode->unlock_key = key_get(key);
afs_lock_may_be_available(vnode);
}
/*
* request a lock on a file on the server
*/
static int afs_do_setlk(struct file *file, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
afs_lock_type_t type;
struct key *key = file->private_data;
int ret;
_enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
/* only whole-file locks are supported */
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
return -EINVAL;
ret = afs_init_lock_manager();
if (ret < 0)
return ret;
fl->fl_ops = &afs_lock_ops;
INIT_LIST_HEAD(&fl->fl_u.afs.link);
fl->fl_u.afs.state = AFS_LOCK_PENDING;
type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
lock_flocks();
/* make sure we've got a callback on this file and that our view of the
* data version is up to date */
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
goto error;
if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
ret = -EAGAIN;
goto error;
}
spin_lock(&vnode->lock);
/* if we've already got a readlock on the server then we can instantly
* grant another readlock, irrespective of whether there are any
* pending writelocks */
if (type == AFS_LOCK_READ &&
vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
_debug("instant readlock");
ASSERTCMP(vnode->flags &
((1 << AFS_VNODE_LOCKING) |
(1 << AFS_VNODE_WRITELOCKED)), ==, 0);
ASSERT(!list_empty(&vnode->granted_locks));
goto sharing_existing_lock;
}
/* if there's no-one else with a lock on this vnode, then we need to
* ask the server for a lock */
if (list_empty(&vnode->pending_locks) &&
list_empty(&vnode->granted_locks)) {
_debug("not locked");
ASSERTCMP(vnode->flags &
((1 << AFS_VNODE_LOCKING) |
(1 << AFS_VNODE_READLOCKED) |
(1 << AFS_VNODE_WRITELOCKED)), ==, 0);
list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
set_bit(AFS_VNODE_LOCKING, &vnode->flags);
spin_unlock(&vnode->lock);
ret = afs_vnode_set_lock(vnode, key, type);
clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
switch (ret) {
case 0:
_debug("acquired");
goto acquired_server_lock;
case -EWOULDBLOCK:
_debug("would block");
spin_lock(&vnode->lock);
ASSERT(list_empty(&vnode->granted_locks));
ASSERTCMP(vnode->pending_locks.next, ==,
&fl->fl_u.afs.link);
goto wait;
default:
spin_lock(&vnode->lock);
list_del_init(&fl->fl_u.afs.link);
spin_unlock(&vnode->lock);
goto error;
}
}
/* otherwise, we need to wait for a local lock to become available */
_debug("wait local");
list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
wait:
if (!(fl->fl_flags & FL_SLEEP)) {
_debug("noblock");
ret = -EAGAIN;
goto abort_attempt;
}
spin_unlock(&vnode->lock);
/* now we need to sleep and wait for the lock manager thread to get the
* lock from the server */
_debug("sleep");
ret = wait_event_interruptible(fl->fl_wait,
fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
ret = fl->fl_u.afs.state;
if (ret < 0)
goto error;
spin_lock(&vnode->lock);
goto given_lock;
}
/* we were interrupted, but someone may still be in the throes of
* giving us the lock */
_debug("intr");
ASSERTCMP(ret, ==, -ERESTARTSYS);
spin_lock(&vnode->lock);
if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
ret = fl->fl_u.afs.state;
if (ret < 0) {
spin_unlock(&vnode->lock);
goto error;
}
goto given_lock;
}
abort_attempt:
/* we aren't going to get the lock, either because we're unwilling to
* wait, or because some signal happened */
_debug("abort");
if (list_empty(&vnode->granted_locks) &&
vnode->pending_locks.next == &fl->fl_u.afs.link) {
if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
/* kick the next pending lock into having a go */
list_del_init(&fl->fl_u.afs.link);
afs_lock_may_be_available(vnode);
}
} else {
list_del_init(&fl->fl_u.afs.link);
}
spin_unlock(&vnode->lock);
goto error;
acquired_server_lock:
/* we've acquired a server lock, but it needs to be renewed after 5
* mins */
spin_lock(&vnode->lock);
afs_schedule_lock_extension(vnode);
if (type == AFS_LOCK_READ)
set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
else
set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
sharing_existing_lock:
/* the lock has been granted as far as we're concerned... */
fl->fl_u.afs.state = AFS_LOCK_GRANTED;
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
given_lock:
/* ... but we do still need to get the VFS's blessing */
ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
(1 << AFS_VNODE_WRITELOCKED))) != 0);
ret = posix_lock_file(file, fl, NULL);
if (ret < 0)
goto vfs_rejected_lock;
spin_unlock(&vnode->lock);
/* again, make sure we've got a callback on this file and, again, make
* sure that our view of the data version is up to date (we ignore
* errors incurred here and deal with the consequences elsewhere) */
afs_vnode_fetch_status(vnode, NULL, key);
error:
unlock_flocks();
_leave(" = %d", ret);
return ret;
vfs_rejected_lock:
/* the VFS rejected the lock we just obtained, so we have to discard
* what we just got */
_debug("vfs refused %d", ret);
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
goto abort_attempt;
}
/*
* unlock on a file on the server
*/
static int afs_do_unlk(struct file *file, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
struct key *key = file->private_data;
int ret;
_enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
/* only whole-file unlocks are supported */
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
return -EINVAL;
fl->fl_ops = &afs_lock_ops;
INIT_LIST_HEAD(&fl->fl_u.afs.link);
fl->fl_u.afs.state = AFS_LOCK_PENDING;
spin_lock(&vnode->lock);
ret = posix_lock_file(file, fl, NULL);
if (ret < 0) {
spin_unlock(&vnode->lock);
_leave(" = %d [vfs]", ret);
return ret;
}
/* discard the server lock only if all granted locks are gone */
if (list_empty(&vnode->granted_locks))
afs_defer_unlock(vnode, key);
spin_unlock(&vnode->lock);
_leave(" = 0");
return 0;
}
/*
* return information about a lock we currently hold, if indeed we hold one
*/
static int afs_do_getlk(struct file *file, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
struct key *key = file->private_data;
int ret, lock_count;
_enter("");
fl->fl_type = F_UNLCK;
mutex_lock(&vnode->vfs_inode.i_mutex);
/* check local lock records first */
ret = 0;
posix_test_lock(file, fl);
if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
goto error;
lock_count = vnode->status.lock_count;
if (lock_count) {
if (lock_count > 0)
fl->fl_type = F_RDLCK;
else
fl->fl_type = F_WRLCK;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
}
}
error:
mutex_unlock(&vnode->vfs_inode.i_mutex);
_leave(" = %d [%hd]", ret, fl->fl_type);
return ret;
}
/*
* manage POSIX locks on a file
*/
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
_enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
fl->fl_type, fl->fl_flags,
(long long) fl->fl_start, (long long) fl->fl_end);
/* AFS doesn't support mandatory locks */
if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
if (IS_GETLK(cmd))
return afs_do_getlk(file, fl);
if (fl->fl_type == F_UNLCK)
return afs_do_unlk(file, fl);
return afs_do_setlk(file, fl);
}
/*
* manage FLOCK locks on a file
*/
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode);
_enter("{%x:%u},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
fl->fl_type, fl->fl_flags);
/*
* No BSD flocks over NFS allowed.
* Note: we could try to fake a POSIX lock request here by
* using ((u32) filp | 0x80000000) or some such as the pid.
* Not sure whether that would be unique, though, or whether
* that would break in other places.
*/
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
/* we're simulating flock() locks using posix locks on the server */
fl->fl_owner = (fl_owner_t) file;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
if (fl->fl_type == F_UNLCK)
return afs_do_unlk(file, fl);
return afs_do_setlk(file, fl);
}
/*
* the POSIX lock management core VFS code copies the lock record and adds the
* copy into its own list, so we need to add that copy to the vnode's lock
* queue in the same place as the original (which will be deleted shortly
* after)
*/
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
{
_enter("");
list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
}
/*
* need to remove this lock from the vnode queue when it's removed from the
* VFS's list
*/
static void afs_fl_release_private(struct file_lock *fl)
{
_enter("");
list_del_init(&fl->fl_u.afs.link);
}
| gpl-2.0 |
byeonggon/0android_kernel_samsung_crespo | sound/oss/dmasound/dmasound_atari.c | 12506 | 42591 | /*
* linux/sound/oss/dmasound/dmasound_atari.c
*
* Atari TT and Falcon DMA Sound Driver
*
* See linux/sound/oss/dmasound/dmasound_core.c for copyright and credits
* prior to 28/01/2001
*
* 28/01/2001 [0.1] Iain Sandoe
* - added versioning
* - put in and populated the hardware_afmts field.
* [0.2] - put in SNDCTL_DSP_GETCAPS value.
* 01/02/2001 [0.3] - put in default hard/soft settings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/soundcard.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/atariints.h>
#include <asm/atari_stram.h>
#include "dmasound.h"
#define DMASOUND_ATARI_REVISION 0
#define DMASOUND_ATARI_EDITION 3
extern void atari_microwire_cmd(int cmd);
static int is_falcon;
static int write_sq_ignore_int; /* ++TeSche: used for Falcon */
static int expand_bal; /* Balance factor for expanding (not volume!) */
static int expand_data; /* Data for expanding */
/*** Translations ************************************************************/
/* ++TeSche: radically changed for new expanding purposes...
*
* These two routines now deal with copying/expanding/translating the samples
* from user space into our buffer at the right frequency. They take care about
* how much data there's actually to read, how much buffer space there is and
* to convert samples into the right frequency/encoding. They will only work on
* complete samples so it may happen they leave some bytes in the input stream
* if the user didn't write a multiple of the current sample size. They both
* return the number of bytes they've used from both streams so you may detect
* such a situation. Luckily all programs should be able to cope with that.
*
* I think I've optimized anything as far as one can do in plain C, all
* variables should fit in registers and the loops are really short. There's
* one loop for every possible situation. Writing a more generalized and thus
* parameterized loop would only produce slower code. Feel free to optimize
* this in assembler if you like. :)
*
* I think these routines belong here because they're not yet really hardware
* independent, especially the fact that the Falcon can play 16bit samples
* only in stereo is hardcoded in both of them!
*
* ++geert: split in even more functions (one per format)
*/
static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
/*** Low level stuff *********************************************************/
static void *AtaAlloc(unsigned int size, gfp_t flags);
static void AtaFree(void *, unsigned int size);
static int AtaIrqInit(void);
#ifdef MODULE
static void AtaIrqCleanUp(void);
#endif /* MODULE */
static int AtaSetBass(int bass);
static int AtaSetTreble(int treble);
static void TTSilence(void);
static void TTInit(void);
static int TTSetFormat(int format);
static int TTSetVolume(int volume);
static int TTSetGain(int gain);
static void FalconSilence(void);
static void FalconInit(void);
static int FalconSetFormat(int format);
static int FalconSetVolume(int volume);
static void AtaPlayNextFrame(int index);
static void AtaPlay(void);
static irqreturn_t AtaInterrupt(int irq, void *dummy);
/*** Mid level stuff *********************************************************/
static void TTMixerInit(void);
static void FalconMixerInit(void);
static int AtaMixerIoctl(u_int cmd, u_long arg);
static int TTMixerIoctl(u_int cmd, u_long arg);
static int FalconMixerIoctl(u_int cmd, u_long arg);
static int AtaWriteSqSetup(void);
static int AtaSqOpen(fmode_t mode);
static int TTStateInfo(char *buffer, size_t space);
static int FalconStateInfo(char *buffer, size_t space);
/*** Translations ************************************************************/
static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8
: dmasound_alaw2dma8;
ssize_t count, used;
u_char *p = &frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft);
if (dmasound.soft.stereo)
count &= ~1;
used = count;
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
return -EFAULT;
*p++ = table[data];
count--;
}
*frameUsed += used;
return used;
}
static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
void *p = &frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft);
if (dmasound.soft.stereo)
count &= ~1;
used = count;
if (copy_from_user(p, userPtr, count))
return -EFAULT;
*frameUsed += used;
return used;
}
static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
if (!dmasound.soft.stereo) {
u_char *p = &frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft);
used = count;
while (count > 0) {
u_char data;
if (get_user(data, userPtr++))
return -EFAULT;
*p++ = data ^ 0x80;
count--;
}
} else {
u_short *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
*p++ = data ^ 0x8080;
count--;
}
}
*frameUsed += used;
return used;
}
static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
*p++ = data;
*p++ = data;
count--;
}
*frameUsed += used*2;
} else {
void *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft) & ~3;
used = count;
if (copy_from_user(p, userPtr, count))
return -EFAULT;
*frameUsed += used;
}
return used;
}
static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data ^= 0x8000;
*p++ = data;
*p++ = data;
count--;
}
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count*4;
while (count > 0) {
u_int data;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
*p++ = data ^ 0x80008000;
count--;
}
*frameUsed += used;
}
return used;
}
static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
count = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data = le2be16(data);
*p++ = data;
*p++ = data;
count--;
}
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count*4;
while (count > 0) {
u_long data;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
data = le2be16dbl(data);
*p++ = data;
count--;
}
*frameUsed += used;
}
return used;
}
static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
ssize_t count, used;
count = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>1;
used = count*2;
while (count > 0) {
u_short data;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data = le2be16(data) ^ 0x8000;
*p++ = data;
*p++ = data;
}
*frameUsed += used*2;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count;
while (count > 0) {
u_long data;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
data = le2be16dbl(data) ^ 0x80008000;
*p++ = data;
count--;
}
*frameUsed += used;
}
return used;
}
static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
char *table = dmasound.soft.format == AFMT_MU_LAW ? dmasound_ulaw2dma8
: dmasound_alaw2dma8;
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_char *p = &frame[*frameUsed];
u_char data = expand_data;
while (frameLeft) {
u_char c;
if (bal < 0) {
if (!userCount)
break;
if (get_user(c, userPtr++))
return -EFAULT;
data = table[c];
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_data = data;
} else {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 2) {
u_char c;
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(c, userPtr++))
return -EFAULT;
data = table[c] << 8;
if (get_user(c, userPtr++))
return -EFAULT;
data |= table[c];
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 2;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_char *p = &frame[*frameUsed];
u_char data = expand_data;
while (frameLeft) {
if (bal < 0) {
if (!userCount)
break;
if (get_user(data, userPtr++))
return -EFAULT;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_data = data;
} else {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 2) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 2;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_char *p = &frame[*frameUsed];
u_char data = expand_data;
while (frameLeft) {
if (bal < 0) {
if (!userCount)
break;
if (get_user(data, userPtr++))
return -EFAULT;
data ^= 0x80;
userCount--;
bal += hSpeed;
}
*p++ = data;
frameLeft--;
bal -= sSpeed;
}
expand_data = data;
} else {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 2) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data ^= 0x8080;
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 2;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
u_long data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 4)
break;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
userCount -= 4;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data ^= 0x8000;
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
u_long data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 4)
break;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
data ^= 0x80008000;
userCount -= 4;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data = le2be16(data);
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
u_long data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 4)
break;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
data = le2be16dbl(data);
userCount -= 4;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
/* this should help gcc to stuff everything into registers */
long bal = expand_bal;
long hSpeed = dmasound.hard.speed, sSpeed = dmasound.soft.speed;
ssize_t used, usedf;
used = userCount;
usedf = frameLeft;
if (!dmasound.soft.stereo) {
u_short *p = (u_short *)&frame[*frameUsed];
u_short data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 2)
break;
if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
userPtr += 2;
data = le2be16(data) ^ 0x8000;
userCount -= 2;
bal += hSpeed;
}
*p++ = data;
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
} else {
u_long *p = (u_long *)&frame[*frameUsed];
u_long data = expand_data;
while (frameLeft >= 4) {
if (bal < 0) {
if (userCount < 4)
break;
if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
userPtr += 4;
data = le2be16dbl(data) ^ 0x80008000;
userCount -= 4;
bal += hSpeed;
}
*p++ = data;
frameLeft -= 4;
bal -= sSpeed;
}
expand_data = data;
}
expand_bal = bal;
used -= userCount;
*frameUsed += usedf-frameLeft;
return used;
}
static TRANS transTTNormal = {
.ct_ulaw = ata_ct_law,
.ct_alaw = ata_ct_law,
.ct_s8 = ata_ct_s8,
.ct_u8 = ata_ct_u8,
};
static TRANS transTTExpanding = {
.ct_ulaw = ata_ctx_law,
.ct_alaw = ata_ctx_law,
.ct_s8 = ata_ctx_s8,
.ct_u8 = ata_ctx_u8,
};
static TRANS transFalconNormal = {
.ct_ulaw = ata_ct_law,
.ct_alaw = ata_ct_law,
.ct_s8 = ata_ct_s8,
.ct_u8 = ata_ct_u8,
.ct_s16be = ata_ct_s16be,
.ct_u16be = ata_ct_u16be,
.ct_s16le = ata_ct_s16le,
.ct_u16le = ata_ct_u16le
};
static TRANS transFalconExpanding = {
.ct_ulaw = ata_ctx_law,
.ct_alaw = ata_ctx_law,
.ct_s8 = ata_ctx_s8,
.ct_u8 = ata_ctx_u8,
.ct_s16be = ata_ctx_s16be,
.ct_u16be = ata_ctx_u16be,
.ct_s16le = ata_ctx_s16le,
.ct_u16le = ata_ctx_u16le,
};
/*** Low level stuff *********************************************************/
/*
* Atari (TT/Falcon)
*/
static void *AtaAlloc(unsigned int size, gfp_t flags)
{
return atari_stram_alloc(size, "dmasound");
}
static void AtaFree(void *obj, unsigned int size)
{
atari_stram_free( obj );
}
static int __init AtaIrqInit(void)
{
/* Set up timer A. Timer A
will receive a signal upon end of playing from the sound
hardware. Furthermore Timer A is able to count events
and will cause an interrupt after a programmed number
of events. So all we need to keep the music playing is
to provide the sound hardware with new data upon
an interrupt from timer A. */
st_mfp.tim_ct_a = 0; /* ++roman: Stop timer before programming! */
st_mfp.tim_dt_a = 1; /* Cause interrupt after first event. */
st_mfp.tim_ct_a = 8; /* Turn on event counting. */
/* Register interrupt handler. */
if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound",
AtaInterrupt))
return 0;
st_mfp.int_en_a |= 0x20; /* Turn interrupt on. */
st_mfp.int_mk_a |= 0x20;
return 1;
}
#ifdef MODULE
static void AtaIrqCleanUp(void)
{
st_mfp.tim_ct_a = 0; /* stop timer */
st_mfp.int_en_a &= ~0x20; /* turn interrupt off */
free_irq(IRQ_MFP_TIMA, AtaInterrupt);
}
#endif /* MODULE */
#define TONE_VOXWARE_TO_DB(v) \
(((v) < 0) ? -12 : ((v) > 100) ? 12 : ((v) - 50) * 6 / 25)
#define TONE_DB_TO_VOXWARE(v) (((v) * 25 + ((v) > 0 ? 5 : -5)) / 6 + 50)
static int AtaSetBass(int bass)
{
dmasound.bass = TONE_VOXWARE_TO_DB(bass);
atari_microwire_cmd(MW_LM1992_BASS(dmasound.bass));
return TONE_DB_TO_VOXWARE(dmasound.bass);
}
static int AtaSetTreble(int treble)
{
dmasound.treble = TONE_VOXWARE_TO_DB(treble);
atari_microwire_cmd(MW_LM1992_TREBLE(dmasound.treble));
return TONE_DB_TO_VOXWARE(dmasound.treble);
}
/*
* TT
*/
static void TTSilence(void)
{
tt_dmasnd.ctrl = DMASND_CTRL_OFF;
atari_microwire_cmd(MW_LM1992_PSG_HIGH); /* mix in PSG signal 1:1 */
}
static void TTInit(void)
{
int mode, i, idx;
const int freq[4] = {50066, 25033, 12517, 6258};
/* search a frequency that fits into the allowed error range */
idx = -1;
for (i = 0; i < ARRAY_SIZE(freq); i++)
/* this isn't as much useful for a TT than for a Falcon, but
* then it doesn't hurt very much to implement it for a TT too.
*/
if ((100 * abs(dmasound.soft.speed - freq[i]) / freq[i]) < catchRadius)
idx = i;
if (idx > -1) {
dmasound.soft.speed = freq[idx];
dmasound.trans_write = &transTTNormal;
} else
dmasound.trans_write = &transTTExpanding;
TTSilence();
dmasound.hard = dmasound.soft;
if (dmasound.hard.speed > 50066) {
/* we would need to squeeze the sound, but we won't do that */
dmasound.hard.speed = 50066;
mode = DMASND_MODE_50KHZ;
dmasound.trans_write = &transTTNormal;
} else if (dmasound.hard.speed > 25033) {
dmasound.hard.speed = 50066;
mode = DMASND_MODE_50KHZ;
} else if (dmasound.hard.speed > 12517) {
dmasound.hard.speed = 25033;
mode = DMASND_MODE_25KHZ;
} else if (dmasound.hard.speed > 6258) {
dmasound.hard.speed = 12517;
mode = DMASND_MODE_12KHZ;
} else {
dmasound.hard.speed = 6258;
mode = DMASND_MODE_6KHZ;
}
tt_dmasnd.mode = (dmasound.hard.stereo ?
DMASND_MODE_STEREO : DMASND_MODE_MONO) |
DMASND_MODE_8BIT | mode;
expand_bal = -dmasound.soft.speed;
}
static int TTSetFormat(int format)
{
/* TT sound DMA supports only 8bit modes */
switch (format) {
case AFMT_QUERY:
return dmasound.soft.format;
case AFMT_MU_LAW:
case AFMT_A_LAW:
case AFMT_S8:
case AFMT_U8:
break;
default:
format = AFMT_S8;
}
dmasound.soft.format = format;
dmasound.soft.size = 8;
if (dmasound.minDev == SND_DEV_DSP) {
dmasound.dsp.format = format;
dmasound.dsp.size = 8;
}
TTInit();
return format;
}
#define VOLUME_VOXWARE_TO_DB(v) \
(((v) < 0) ? -40 : ((v) > 100) ? 0 : ((v) * 2) / 5 - 40)
#define VOLUME_DB_TO_VOXWARE(v) ((((v) + 40) * 5 + 1) / 2)
static int TTSetVolume(int volume)
{
dmasound.volume_left = VOLUME_VOXWARE_TO_DB(volume & 0xff);
atari_microwire_cmd(MW_LM1992_BALLEFT(dmasound.volume_left));
dmasound.volume_right = VOLUME_VOXWARE_TO_DB((volume & 0xff00) >> 8);
atari_microwire_cmd(MW_LM1992_BALRIGHT(dmasound.volume_right));
return VOLUME_DB_TO_VOXWARE(dmasound.volume_left) |
(VOLUME_DB_TO_VOXWARE(dmasound.volume_right) << 8);
}
#define GAIN_VOXWARE_TO_DB(v) \
(((v) < 0) ? -80 : ((v) > 100) ? 0 : ((v) * 4) / 5 - 80)
#define GAIN_DB_TO_VOXWARE(v) ((((v) + 80) * 5 + 1) / 4)
static int TTSetGain(int gain)
{
dmasound.gain = GAIN_VOXWARE_TO_DB(gain);
atari_microwire_cmd(MW_LM1992_VOLUME(dmasound.gain));
return GAIN_DB_TO_VOXWARE(dmasound.gain);
}
/*
* Falcon
*/
static void FalconSilence(void)
{
/* stop playback, set sample rate 50kHz for PSG sound */
tt_dmasnd.ctrl = DMASND_CTRL_OFF;
tt_dmasnd.mode = DMASND_MODE_50KHZ | DMASND_MODE_STEREO | DMASND_MODE_8BIT;
tt_dmasnd.int_div = 0; /* STE compatible divider */
tt_dmasnd.int_ctrl = 0x0;
tt_dmasnd.cbar_src = 0x0000; /* no matrix inputs */
tt_dmasnd.cbar_dst = 0x0000; /* no matrix outputs */
tt_dmasnd.dac_src = 1; /* connect ADC to DAC, disconnect matrix */
tt_dmasnd.adc_src = 3; /* ADC Input = PSG */
}
static void FalconInit(void)
{
int divider, i, idx;
const int freq[8] = {49170, 32780, 24585, 19668, 16390, 12292, 9834, 8195};
/* search a frequency that fits into the allowed error range */
idx = -1;
for (i = 0; i < ARRAY_SIZE(freq); i++)
/* if we will tolerate 3% error 8000Hz->8195Hz (2.38%) would
* be playable without expanding, but that now a kernel runtime
* option
*/
if ((100 * abs(dmasound.soft.speed - freq[i]) / freq[i]) < catchRadius)
idx = i;
if (idx > -1) {
dmasound.soft.speed = freq[idx];
dmasound.trans_write = &transFalconNormal;
} else
dmasound.trans_write = &transFalconExpanding;
FalconSilence();
dmasound.hard = dmasound.soft;
if (dmasound.hard.size == 16) {
/* the Falcon can play 16bit samples only in stereo */
dmasound.hard.stereo = 1;
}
if (dmasound.hard.speed > 49170) {
/* we would need to squeeze the sound, but we won't do that */
dmasound.hard.speed = 49170;
divider = 1;
dmasound.trans_write = &transFalconNormal;
} else if (dmasound.hard.speed > 32780) {
dmasound.hard.speed = 49170;
divider = 1;
} else if (dmasound.hard.speed > 24585) {
dmasound.hard.speed = 32780;
divider = 2;
} else if (dmasound.hard.speed > 19668) {
dmasound.hard.speed = 24585;
divider = 3;
} else if (dmasound.hard.speed > 16390) {
dmasound.hard.speed = 19668;
divider = 4;
} else if (dmasound.hard.speed > 12292) {
dmasound.hard.speed = 16390;
divider = 5;
} else if (dmasound.hard.speed > 9834) {
dmasound.hard.speed = 12292;
divider = 7;
} else if (dmasound.hard.speed > 8195) {
dmasound.hard.speed = 9834;
divider = 9;
} else {
dmasound.hard.speed = 8195;
divider = 11;
}
tt_dmasnd.int_div = divider;
/* Setup Falcon sound DMA for playback */
tt_dmasnd.int_ctrl = 0x4; /* Timer A int at play end */
tt_dmasnd.track_select = 0x0; /* play 1 track, track 1 */
tt_dmasnd.cbar_src = 0x0001; /* DMA(25MHz) --> DAC */
tt_dmasnd.cbar_dst = 0x0000;
tt_dmasnd.rec_track_select = 0;
tt_dmasnd.dac_src = 2; /* connect matrix to DAC */
tt_dmasnd.adc_src = 0; /* ADC Input = Mic */
tt_dmasnd.mode = (dmasound.hard.stereo ?
DMASND_MODE_STEREO : DMASND_MODE_MONO) |
((dmasound.hard.size == 8) ?
DMASND_MODE_8BIT : DMASND_MODE_16BIT) |
DMASND_MODE_6KHZ;
expand_bal = -dmasound.soft.speed;
}
static int FalconSetFormat(int format)
{
int size;
/* Falcon sound DMA supports 8bit and 16bit modes */
switch (format) {
case AFMT_QUERY:
return dmasound.soft.format;
case AFMT_MU_LAW:
case AFMT_A_LAW:
case AFMT_U8:
case AFMT_S8:
size = 8;
break;
case AFMT_S16_BE:
case AFMT_U16_BE:
case AFMT_S16_LE:
case AFMT_U16_LE:
size = 16;
break;
default: /* :-) */
size = 8;
format = AFMT_S8;
}
dmasound.soft.format = format;
dmasound.soft.size = size;
if (dmasound.minDev == SND_DEV_DSP) {
dmasound.dsp.format = format;
dmasound.dsp.size = dmasound.soft.size;
}
FalconInit();
return format;
}
/* This is for the Falcon output *attenuation* in 1.5dB steps,
* i.e. output level from 0 to -22.5dB in -1.5dB steps.
*/
#define VOLUME_VOXWARE_TO_ATT(v) \
((v) < 0 ? 15 : (v) > 100 ? 0 : 15 - (v) * 3 / 20)
#define VOLUME_ATT_TO_VOXWARE(v) (100 - (v) * 20 / 3)
static int FalconSetVolume(int volume)
{
dmasound.volume_left = VOLUME_VOXWARE_TO_ATT(volume & 0xff);
dmasound.volume_right = VOLUME_VOXWARE_TO_ATT((volume & 0xff00) >> 8);
tt_dmasnd.output_atten = dmasound.volume_left << 8 | dmasound.volume_right << 4;
return VOLUME_ATT_TO_VOXWARE(dmasound.volume_left) |
VOLUME_ATT_TO_VOXWARE(dmasound.volume_right) << 8;
}
static void AtaPlayNextFrame(int index)
{
char *start, *end;
/* used by AtaPlay() if all doubts whether there really is something
* to be played are already wiped out.
*/
start = write_sq.buffers[write_sq.front];
end = start+((write_sq.count == index) ? write_sq.rear_size
: write_sq.block_size);
/* end might not be a legal virtual address. */
DMASNDSetEnd(virt_to_phys(end - 1) + 1);
DMASNDSetBase(virt_to_phys(start));
/* Since only an even number of samples per frame can
be played, we might lose one byte here. (TO DO) */
write_sq.front = (write_sq.front+1) % write_sq.max_count;
write_sq.active++;
tt_dmasnd.ctrl = DMASND_CTRL_ON | DMASND_CTRL_REPEAT;
}
static void AtaPlay(void)
{
/* ++TeSche: Note that write_sq.active is no longer just a flag but
* holds the number of frames the DMA is currently programmed for
* instead, may be 0, 1 (currently being played) or 2 (pre-programmed).
*
* Changes done to write_sq.count and write_sq.active are a bit more
* subtle again so now I must admit I also prefer disabling the irq
* here rather than considering all possible situations. But the point
* is that disabling the irq doesn't have any bad influence on this
* version of the driver as we benefit from having pre-programmed the
* DMA wherever possible: There's no need to reload the DMA at the
* exact time of an interrupt but only at some time while the
* pre-programmed frame is playing!
*/
atari_disable_irq(IRQ_MFP_TIMA);
if (write_sq.active == 2 || /* DMA is 'full' */
write_sq.count <= 0) { /* nothing to do */
atari_enable_irq(IRQ_MFP_TIMA);
return;
}
if (write_sq.active == 0) {
/* looks like there's nothing 'in' the DMA yet, so try
* to put two frames into it (at least one is available).
*/
if (write_sq.count == 1 &&
write_sq.rear_size < write_sq.block_size &&
!write_sq.syncing) {
/* hmmm, the only existing frame is not
* yet filled and we're not syncing?
*/
atari_enable_irq(IRQ_MFP_TIMA);
return;
}
AtaPlayNextFrame(1);
if (write_sq.count == 1) {
/* no more frames */
atari_enable_irq(IRQ_MFP_TIMA);
return;
}
if (write_sq.count == 2 &&
write_sq.rear_size < write_sq.block_size &&
!write_sq.syncing) {
/* hmmm, there were two frames, but the second
* one is not yet filled and we're not syncing?
*/
atari_enable_irq(IRQ_MFP_TIMA);
return;
}
AtaPlayNextFrame(2);
} else {
/* there's already a frame being played so we may only stuff
* one new into the DMA, but even if this may be the last
* frame existing the previous one is still on write_sq.count.
*/
if (write_sq.count == 2 &&
write_sq.rear_size < write_sq.block_size &&
!write_sq.syncing) {
/* hmmm, the only existing frame is not
* yet filled and we're not syncing?
*/
atari_enable_irq(IRQ_MFP_TIMA);
return;
}
AtaPlayNextFrame(2);
}
atari_enable_irq(IRQ_MFP_TIMA);
}
static irqreturn_t AtaInterrupt(int irq, void *dummy)
{
#if 0
/* ++TeSche: if you should want to test this... */
static int cnt;
if (write_sq.active == 2)
if (++cnt == 10) {
/* simulate losing an interrupt */
cnt = 0;
return IRQ_HANDLED;
}
#endif
spin_lock(&dmasound.lock);
if (write_sq_ignore_int && is_falcon) {
/* ++TeSche: Falcon only: ignore first irq because it comes
* immediately after starting a frame. after that, irqs come
* (almost) like on the TT.
*/
write_sq_ignore_int = 0;
goto out;
}
if (!write_sq.active) {
/* playing was interrupted and sq_reset() has already cleared
* the sq variables, so better don't do anything here.
*/
WAKE_UP(write_sq.sync_queue);
goto out;
}
/* Probably ;) one frame is finished. Well, in fact it may be that a
* pre-programmed one is also finished because there has been a long
* delay in interrupt delivery and we've completely lost one, but
* there's no way to detect such a situation. In such a case the last
* frame will be played more than once and the situation will recover
* as soon as the irq gets through.
*/
write_sq.count--;
write_sq.active--;
if (!write_sq.active) {
tt_dmasnd.ctrl = DMASND_CTRL_OFF;
write_sq_ignore_int = 1;
}
WAKE_UP(write_sq.action_queue);
/* At least one block of the queue is free now
so wake up a writing process blocked because
of a full queue. */
if ((write_sq.active != 1) || (write_sq.count != 1))
/* We must be a bit carefully here: write_sq.count indicates the
* number of buffers used and not the number of frames to be
* played. If write_sq.count==1 and write_sq.active==1 that
* means the only remaining frame was already programmed
* earlier (and is currently running) so we mustn't call
* AtaPlay() here, otherwise we'll play one frame too much.
*/
AtaPlay();
if (!write_sq.active) WAKE_UP(write_sq.sync_queue);
/* We are not playing after AtaPlay(), so there
is nothing to play any more. Wake up a process
waiting for audio output to drain. */
out:
spin_unlock(&dmasound.lock);
return IRQ_HANDLED;
}
/*** Mid level stuff *********************************************************/
/*
* /dev/mixer abstraction
*/
#define RECLEVEL_VOXWARE_TO_GAIN(v) \
((v) < 0 ? 0 : (v) > 100 ? 15 : (v) * 3 / 20)
#define RECLEVEL_GAIN_TO_VOXWARE(v) (((v) * 20 + 2) / 3)
static void __init TTMixerInit(void)
{
atari_microwire_cmd(MW_LM1992_VOLUME(0));
dmasound.volume_left = 0;
atari_microwire_cmd(MW_LM1992_BALLEFT(0));
dmasound.volume_right = 0;
atari_microwire_cmd(MW_LM1992_BALRIGHT(0));
atari_microwire_cmd(MW_LM1992_TREBLE(0));
atari_microwire_cmd(MW_LM1992_BASS(0));
}
static void __init FalconMixerInit(void)
{
dmasound.volume_left = (tt_dmasnd.output_atten & 0xf00) >> 8;
dmasound.volume_right = (tt_dmasnd.output_atten & 0xf0) >> 4;
}
static int AtaMixerIoctl(u_int cmd, u_long arg)
{
int data;
unsigned long flags;
switch (cmd) {
case SOUND_MIXER_READ_SPEAKER:
if (is_falcon || MACH_IS_TT) {
int porta;
spin_lock_irqsave(&dmasound.lock, flags);
sound_ym.rd_data_reg_sel = 14;
porta = sound_ym.rd_data_reg_sel;
spin_unlock_irqrestore(&dmasound.lock, flags);
return IOCTL_OUT(arg, porta & 0x40 ? 0 : 100);
}
break;
case SOUND_MIXER_WRITE_VOLUME:
IOCTL_IN(arg, data);
return IOCTL_OUT(arg, dmasound_set_volume(data));
case SOUND_MIXER_WRITE_SPEAKER:
if (is_falcon || MACH_IS_TT) {
int porta;
IOCTL_IN(arg, data);
spin_lock_irqsave(&dmasound.lock, flags);
sound_ym.rd_data_reg_sel = 14;
porta = (sound_ym.rd_data_reg_sel & ~0x40) |
(data < 50 ? 0x40 : 0);
sound_ym.wd_data = porta;
spin_unlock_irqrestore(&dmasound.lock, flags);
return IOCTL_OUT(arg, porta & 0x40 ? 0 : 100);
}
}
return -EINVAL;
}
static int TTMixerIoctl(u_int cmd, u_long arg)
{
int data;
switch (cmd) {
case SOUND_MIXER_READ_RECMASK:
return IOCTL_OUT(arg, 0);
case SOUND_MIXER_READ_DEVMASK:
return IOCTL_OUT(arg,
SOUND_MASK_VOLUME | SOUND_MASK_TREBLE | SOUND_MASK_BASS |
(MACH_IS_TT ? SOUND_MASK_SPEAKER : 0));
case SOUND_MIXER_READ_STEREODEVS:
return IOCTL_OUT(arg, SOUND_MASK_VOLUME);
case SOUND_MIXER_READ_VOLUME:
return IOCTL_OUT(arg,
VOLUME_DB_TO_VOXWARE(dmasound.volume_left) |
(VOLUME_DB_TO_VOXWARE(dmasound.volume_right) << 8));
case SOUND_MIXER_READ_BASS:
return IOCTL_OUT(arg, TONE_DB_TO_VOXWARE(dmasound.bass));
case SOUND_MIXER_READ_TREBLE:
return IOCTL_OUT(arg, TONE_DB_TO_VOXWARE(dmasound.treble));
case SOUND_MIXER_READ_OGAIN:
return IOCTL_OUT(arg, GAIN_DB_TO_VOXWARE(dmasound.gain));
case SOUND_MIXER_WRITE_BASS:
IOCTL_IN(arg, data);
return IOCTL_OUT(arg, dmasound_set_bass(data));
case SOUND_MIXER_WRITE_TREBLE:
IOCTL_IN(arg, data);
return IOCTL_OUT(arg, dmasound_set_treble(data));
case SOUND_MIXER_WRITE_OGAIN:
IOCTL_IN(arg, data);
return IOCTL_OUT(arg, dmasound_set_gain(data));
}
return AtaMixerIoctl(cmd, arg);
}
static int FalconMixerIoctl(u_int cmd, u_long arg)
{
int data;
switch (cmd) {
case SOUND_MIXER_READ_RECMASK:
return IOCTL_OUT(arg, SOUND_MASK_MIC);
case SOUND_MIXER_READ_DEVMASK:
return IOCTL_OUT(arg, SOUND_MASK_VOLUME | SOUND_MASK_MIC | SOUND_MASK_SPEAKER);
case SOUND_MIXER_READ_STEREODEVS:
return IOCTL_OUT(arg, SOUND_MASK_VOLUME | SOUND_MASK_MIC);
case SOUND_MIXER_READ_VOLUME:
return IOCTL_OUT(arg,
VOLUME_ATT_TO_VOXWARE(dmasound.volume_left) |
VOLUME_ATT_TO_VOXWARE(dmasound.volume_right) << 8);
case SOUND_MIXER_READ_CAPS:
return IOCTL_OUT(arg, SOUND_CAP_EXCL_INPUT);
case SOUND_MIXER_WRITE_MIC:
IOCTL_IN(arg, data);
tt_dmasnd.input_gain =
RECLEVEL_VOXWARE_TO_GAIN(data & 0xff) << 4 |
RECLEVEL_VOXWARE_TO_GAIN(data >> 8 & 0xff);
/* fall thru, return set value */
case SOUND_MIXER_READ_MIC:
return IOCTL_OUT(arg,
RECLEVEL_GAIN_TO_VOXWARE(tt_dmasnd.input_gain >> 4 & 0xf) |
RECLEVEL_GAIN_TO_VOXWARE(tt_dmasnd.input_gain & 0xf) << 8);
}
return AtaMixerIoctl(cmd, arg);
}
static int AtaWriteSqSetup(void)
{
write_sq_ignore_int = 0;
return 0 ;
}
static int AtaSqOpen(fmode_t mode)
{
write_sq_ignore_int = 1;
return 0 ;
}
static int TTStateInfo(char *buffer, size_t space)
{
int len = 0;
len += sprintf(buffer+len, "\tvol left %ddB [-40... 0]\n",
dmasound.volume_left);
len += sprintf(buffer+len, "\tvol right %ddB [-40... 0]\n",
dmasound.volume_right);
len += sprintf(buffer+len, "\tbass %ddB [-12...+12]\n",
dmasound.bass);
len += sprintf(buffer+len, "\ttreble %ddB [-12...+12]\n",
dmasound.treble);
if (len >= space) {
printk(KERN_ERR "dmasound_atari: overflowed state buffer alloc.\n") ;
len = space ;
}
return len;
}
static int FalconStateInfo(char *buffer, size_t space)
{
int len = 0;
len += sprintf(buffer+len, "\tvol left %ddB [-22.5 ... 0]\n",
dmasound.volume_left);
len += sprintf(buffer+len, "\tvol right %ddB [-22.5 ... 0]\n",
dmasound.volume_right);
if (len >= space) {
printk(KERN_ERR "dmasound_atari: overflowed state buffer alloc.\n") ;
len = space ;
}
return len;
}
/*** Machine definitions *****************************************************/
static SETTINGS def_hard_falcon = {
.format = AFMT_S8,
.stereo = 0,
.size = 8,
.speed = 8195
} ;
static SETTINGS def_hard_tt = {
.format = AFMT_S8,
.stereo = 0,
.size = 8,
.speed = 12517
} ;
static SETTINGS def_soft = {
.format = AFMT_U8,
.stereo = 0,
.size = 8,
.speed = 8000
} ;
static __initdata MACHINE machTT = {
.name = "Atari",
.name2 = "TT",
.owner = THIS_MODULE,
.dma_alloc = AtaAlloc,
.dma_free = AtaFree,
.irqinit = AtaIrqInit,
#ifdef MODULE
.irqcleanup = AtaIrqCleanUp,
#endif /* MODULE */
.init = TTInit,
.silence = TTSilence,
.setFormat = TTSetFormat,
.setVolume = TTSetVolume,
.setBass = AtaSetBass,
.setTreble = AtaSetTreble,
.setGain = TTSetGain,
.play = AtaPlay,
.mixer_init = TTMixerInit,
.mixer_ioctl = TTMixerIoctl,
.write_sq_setup = AtaWriteSqSetup,
.sq_open = AtaSqOpen,
.state_info = TTStateInfo,
.min_dsp_speed = 6258,
.version = ((DMASOUND_ATARI_REVISION<<8) | DMASOUND_ATARI_EDITION),
.hardware_afmts = AFMT_S8, /* h'ware-supported formats *only* here */
.capabilities = DSP_CAP_BATCH /* As per SNDCTL_DSP_GETCAPS */
};
static __initdata MACHINE machFalcon = {
.name = "Atari",
.name2 = "FALCON",
.dma_alloc = AtaAlloc,
.dma_free = AtaFree,
.irqinit = AtaIrqInit,
#ifdef MODULE
.irqcleanup = AtaIrqCleanUp,
#endif /* MODULE */
.init = FalconInit,
.silence = FalconSilence,
.setFormat = FalconSetFormat,
.setVolume = FalconSetVolume,
.setBass = AtaSetBass,
.setTreble = AtaSetTreble,
.play = AtaPlay,
.mixer_init = FalconMixerInit,
.mixer_ioctl = FalconMixerIoctl,
.write_sq_setup = AtaWriteSqSetup,
.sq_open = AtaSqOpen,
.state_info = FalconStateInfo,
.min_dsp_speed = 8195,
.version = ((DMASOUND_ATARI_REVISION<<8) | DMASOUND_ATARI_EDITION),
.hardware_afmts = (AFMT_S8 | AFMT_S16_BE), /* h'ware-supported formats *only* here */
.capabilities = DSP_CAP_BATCH /* As per SNDCTL_DSP_GETCAPS */
};
/*** Config & Setup **********************************************************/
static int __init dmasound_atari_init(void)
{
if (MACH_IS_ATARI && ATARIHW_PRESENT(PCM_8BIT)) {
if (ATARIHW_PRESENT(CODEC)) {
dmasound.mach = machFalcon;
dmasound.mach.default_soft = def_soft ;
dmasound.mach.default_hard = def_hard_falcon ;
is_falcon = 1;
} else if (ATARIHW_PRESENT(MICROWIRE)) {
dmasound.mach = machTT;
dmasound.mach.default_soft = def_soft ;
dmasound.mach.default_hard = def_hard_tt ;
is_falcon = 0;
} else
return -ENODEV;
if ((st_mfp.int_en_a & st_mfp.int_mk_a & 0x20) == 0)
return dmasound_init();
else {
printk("DMA sound driver: Timer A interrupt already in use\n");
return -EBUSY;
}
}
return -ENODEV;
}
static void __exit dmasound_atari_cleanup(void)
{
dmasound_deinit();
}
module_init(dmasound_atari_init);
module_exit(dmasound_atari_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ernestj/pitft | drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c | 219 | 2928 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <engine/software.h>
#include <engine/disp.h>
#include <core/class.h>
#include "nv50.h"
static struct nouveau_oclass
nvf0_disp_sclass[] = {
{ NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
{ NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
{ NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
{ NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
{ NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
{}
};
static struct nouveau_oclass
nvf0_disp_base_oclass[] = {
{ NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
{}
};
static int
nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_disp_priv *priv;
int heads = nv_rd32(parent, 0x022448);
int ret;
if (nv_rd32(parent, 0x022500) & 0x00000001)
return -ENODEV;
ret = nouveau_disp_create(parent, engine, oclass, heads,
"PDISP", "display", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_engine(priv)->sclass = nvf0_disp_base_oclass;
nv_engine(priv)->cclass = &nv50_disp_cclass;
nv_subdev(priv)->intr = nvd0_disp_intr;
INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
priv->sclass = nvf0_disp_sclass;
priv->head.nr = heads;
priv->dac.nr = 3;
priv->sor.nr = 4;
priv->dac.power = nv50_dac_power;
priv->dac.sense = nv50_dac_sense;
priv->sor.power = nv50_sor_power;
priv->sor.hda_eld = nvd0_hda_eld;
priv->sor.hdmi = nvd0_hdmi_ctrl;
priv->sor.dp = &nvd0_sor_dp_func;
return 0;
}
struct nouveau_oclass
nvf0_disp_oclass = {
.handle = NV_ENGINE(DISP, 0x92),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvf0_disp_ctor,
.dtor = _nouveau_disp_dtor,
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
};
| gpl-2.0 |
gdyuldin/huawei_u8850_kernel_ics | drivers/net/bcm63xx_enet.c | 731 | 49301 | /*
* Driver for BCM963xx builtin Ethernet mac
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <bcm63xx_dev_enet.h>
#include "bcm63xx_enet.h"
static char bcm_enet_driver_name[] = "bcm63xx_enet";
static char bcm_enet_driver_version[] = "1.0";
static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
/* io memory shared between all devices */
static void __iomem *bcm_enet_shared_base;
/*
* io helpers to access mac registers
*/
static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
{
return bcm_readl(priv->base + off);
}
static inline void enet_writel(struct bcm_enet_priv *priv,
u32 val, u32 off)
{
bcm_writel(val, priv->base + off);
}
/*
* io helpers to access shared registers
*/
static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
{
return bcm_readl(bcm_enet_shared_base + off);
}
static inline void enet_dma_writel(struct bcm_enet_priv *priv,
u32 val, u32 off)
{
bcm_writel(val, bcm_enet_shared_base + off);
}
/*
* write given data into mii register and wait for transfer to end
* with timeout (average measured transfer time is 25us)
*/
static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
{
int limit;
/* make sure mii interrupt status is cleared */
enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
enet_writel(priv, data, ENET_MIIDATA_REG);
wmb();
/* busy wait on mii interrupt bit, with timeout */
limit = 1000;
do {
if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
break;
udelay(1);
} while (limit-- > 0);
return (limit < 0) ? 1 : 0;
}
/*
* MII internal read callback
*/
static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
int regnum)
{
u32 tmp, val;
tmp = regnum << ENET_MIIDATA_REG_SHIFT;
tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
tmp |= ENET_MIIDATA_OP_READ_MASK;
if (do_mdio_op(priv, tmp))
return -1;
val = enet_readl(priv, ENET_MIIDATA_REG);
val &= 0xffff;
return val;
}
/*
* MII internal write callback
*/
static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
int regnum, u16 value)
{
u32 tmp;
tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
tmp |= ENET_MIIDATA_OP_WRITE_MASK;
(void)do_mdio_op(priv, tmp);
return 0;
}
/*
* MII read callback from phylib
*/
static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
int regnum)
{
return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
}
/*
* MII write callback from phylib
*/
static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
int regnum, u16 value)
{
return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
}
/*
* MII read callback from mii core
*/
static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
int regnum)
{
return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
}
/*
* MII write callback from mii core
*/
static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
int regnum, int value)
{
bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
}
/*
* refill rx queue
*/
static int bcm_enet_refill_rx(struct net_device *dev)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
if (!priv->rx_skb[desc_idx]) {
skb = netdev_alloc_skb(dev, priv->rx_skb_size);
if (!skb)
break;
priv->rx_skb[desc_idx] = skb;
p = dma_map_single(&priv->pdev->dev, skb->data,
priv->rx_skb_size,
DMA_FROM_DEVICE);
desc->address = p;
}
len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= DMADESC_WRAP_MASK;
priv->rx_dirty_desc = 0;
} else {
priv->rx_dirty_desc++;
}
wmb();
desc->len_stat = len_stat;
priv->rx_desc_count++;
/* tell dma engine we allocated one buffer */
enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
}
/* If rx ring is still empty, set a timer to try allocating
* again at a later time. */
if (priv->rx_desc_count == 0 && netif_running(dev)) {
dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
priv->rx_timeout.expires = jiffies + HZ;
add_timer(&priv->rx_timeout);
}
return 0;
}
/*
* timer callback to defer refill rx queue in case we're OOM
*/
static void bcm_enet_refill_rx_timer(unsigned long data)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
dev = (struct net_device *)data;
priv = netdev_priv(dev);
spin_lock(&priv->rx_lock);
bcm_enet_refill_rx((struct net_device *)data);
spin_unlock(&priv->rx_lock);
}
/*
* extract packet from rx queue
*/
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
processed = 0;
/* don't scan ring further than number of refilled
* descriptor */
if (budget > priv->rx_desc_count)
budget = priv->rx_desc_count;
do {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
int desc_idx;
u32 len_stat;
unsigned int len;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
/* make sure we actually read the descriptor status at
* each loop */
rmb();
len_stat = desc->len_stat;
/* break if dma ownership belongs to hw */
if (len_stat & DMADESC_OWNER_MASK)
break;
processed++;
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
priv->stats.rx_dropped++;
continue;
}
/* recycle packet if it's marked as bad */
if (unlikely(len_stat & DMADESC_ERR_MASK)) {
priv->stats.rx_errors++;
if (len_stat & DMADESC_OVSIZE_MASK)
priv->stats.rx_length_errors++;
if (len_stat & DMADESC_CRC_MASK)
priv->stats.rx_crc_errors++;
if (len_stat & DMADESC_UNDER_MASK)
priv->stats.rx_frame_errors++;
if (len_stat & DMADESC_OV_MASK)
priv->stats.rx_fifo_errors++;
continue;
}
/* valid packet */
skb = priv->rx_skb[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
struct sk_buff *nskb;
nskb = netdev_alloc_skb_ip_align(dev, len);
if (!nskb) {
/* forget packet, just rearm desc */
priv->stats.rx_dropped++;
continue;
}
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
memcpy(nskb->data, skb->data, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
skb = nskb;
} else {
dma_unmap_single(&priv->pdev->dev, desc->address,
priv->rx_skb_size, DMA_FROM_DEVICE);
priv->rx_skb[desc_idx] = NULL;
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
netif_receive_skb(skb);
} while (--budget > 0);
if (processed || !priv->rx_desc_count) {
bcm_enet_refill_rx(dev);
/* kick rx dma */
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
}
return processed;
}
/*
* try to or force reclaim of transmitted buffers
*/
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
int released;
priv = netdev_priv(dev);
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
struct bcm_enet_desc *desc;
struct sk_buff *skb;
/* We run in a bh and fight against start_xmit, which
* is called with bh disabled */
spin_lock(&priv->tx_lock);
desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
spin_unlock(&priv->tx_lock);
break;
}
/* ensure other field of the descriptor were not read
* before we checked ownership */
rmb();
skb = priv->tx_skb[priv->tx_dirty_desc];
priv->tx_skb[priv->tx_dirty_desc] = NULL;
dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
DMA_TO_DEVICE);
priv->tx_dirty_desc++;
if (priv->tx_dirty_desc == priv->tx_ring_size)
priv->tx_dirty_desc = 0;
priv->tx_desc_count++;
spin_unlock(&priv->tx_lock);
if (desc->len_stat & DMADESC_UNDER_MASK)
priv->stats.tx_errors++;
dev_kfree_skb(skb);
released++;
}
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
return released;
}
/*
* poll func, called by network core
*/
static int bcm_enet_poll(struct napi_struct *napi, int budget)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
int tx_work_done, rx_work_done;
priv = container_of(napi, struct bcm_enet_priv, napi);
dev = priv->net_dev;
/* ack interrupts */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->tx_chan));
/* reclaim sent skb */
tx_work_done = bcm_enet_tx_reclaim(dev, 0);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
spin_unlock(&priv->rx_lock);
if (rx_work_done >= budget || tx_work_done > 0) {
/* rx/tx queue is not yet empty/clean */
return rx_work_done;
}
/* no more packet in rx/tx queue, remove device from poll
* queue */
napi_complete(napi);
/* restore rx/tx interrupt */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->tx_chan));
return rx_work_done;
}
/*
* mac interrupt handler
*/
static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
u32 stat;
dev = dev_id;
priv = netdev_priv(dev);
stat = enet_readl(priv, ENET_IR_REG);
if (!(stat & ENET_IR_MIB))
return IRQ_NONE;
/* clear & mask interrupt */
enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
enet_writel(priv, 0, ENET_IRMASK_REG);
/* read mib registers in workqueue */
schedule_work(&priv->mib_update_task);
return IRQ_HANDLED;
}
/*
* rx/tx dma interrupt handler
*/
static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
{
struct net_device *dev;
struct bcm_enet_priv *priv;
dev = dev_id;
priv = netdev_priv(dev);
/* mask rx/tx interrupts */
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
napi_schedule(&priv->napi);
return IRQ_HANDLED;
}
/*
* tx request callback
*/
static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct bcm_enet_desc *desc;
u32 len_stat;
int ret;
priv = netdev_priv(dev);
/* lock against tx reclaim */
spin_lock(&priv->tx_lock);
/* make sure the tx hw queue is not full, should not happen
* since we stop queue before it's the case */
if (unlikely(!priv->tx_desc_count)) {
netif_stop_queue(dev);
dev_err(&priv->pdev->dev, "xmit called with no tx desc "
"available?\n");
ret = NETDEV_TX_BUSY;
goto out_unlock;
}
/* point to the next available desc */
desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
priv->tx_skb[priv->tx_curr_desc] = skb;
/* fill descriptor */
desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
len_stat |= DMADESC_ESOP_MASK |
DMADESC_APPEND_CRC |
DMADESC_OWNER_MASK;
priv->tx_curr_desc++;
if (priv->tx_curr_desc == priv->tx_ring_size) {
priv->tx_curr_desc = 0;
len_stat |= DMADESC_WRAP_MASK;
}
priv->tx_desc_count--;
/* dma might be already polling, make sure we update desc
* fields in correct order */
wmb();
desc->len_stat = len_stat;
wmb();
/* kick tx dma */
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->tx_chan));
/* stop queue if no more desc available */
if (!priv->tx_desc_count)
netif_stop_queue(dev);
priv->stats.tx_bytes += skb->len;
priv->stats.tx_packets++;
ret = NETDEV_TX_OK;
out_unlock:
spin_unlock(&priv->tx_lock);
return ret;
}
/*
* Change the interface's mac address.
*/
static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
{
struct bcm_enet_priv *priv;
struct sockaddr *addr = p;
u32 val;
priv = netdev_priv(dev);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
(dev->dev_addr[4] << 8) | dev->dev_addr[5];
enet_writel(priv, val, ENET_PML_REG(0));
val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
val |= ENET_PMH_DATAVALID_MASK;
enet_writel(priv, val, ENET_PMH_REG(0));
return 0;
}
/*
* Change rx mode (promiscous/allmulti) and update multicast list
*/
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct netdev_hw_addr *ha;
u32 val;
int i;
priv = netdev_priv(dev);
val = enet_readl(priv, ENET_RXCFG_REG);
if (dev->flags & IFF_PROMISC)
val |= ENET_RXCFG_PROMISC_MASK;
else
val &= ~ENET_RXCFG_PROMISC_MASK;
/* only 3 perfect match registers left, first one is used for
* own mac address */
if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
/* no need to set perfect match registers if we catch all
* multicast */
if (val & ENET_RXCFG_ALLMCAST_MASK) {
enet_writel(priv, val, ENET_RXCFG_REG);
return;
}
i = 0;
netdev_for_each_mc_addr(ha, dev) {
u8 *dmi_addr;
u32 tmp;
if (i == 3)
break;
/* update perfect match registers */
dmi_addr = ha->addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
(dmi_addr[4] << 8) | dmi_addr[5];
enet_writel(priv, tmp, ENET_PML_REG(i + 1));
tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
tmp |= ENET_PMH_DATAVALID_MASK;
enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
}
for (; i < 3; i++) {
enet_writel(priv, 0, ENET_PML_REG(i + 1));
enet_writel(priv, 0, ENET_PMH_REG(i + 1));
}
enet_writel(priv, val, ENET_RXCFG_REG);
}
/*
* set mac duplex parameters
*/
static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
{
u32 val;
val = enet_readl(priv, ENET_TXCTL_REG);
if (fullduplex)
val |= ENET_TXCTL_FD_MASK;
else
val &= ~ENET_TXCTL_FD_MASK;
enet_writel(priv, val, ENET_TXCTL_REG);
}
/*
* set mac flow control parameters
*/
static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
{
u32 val;
/* rx flow control (pause frame handling) */
val = enet_readl(priv, ENET_RXCFG_REG);
if (rx_en)
val |= ENET_RXCFG_ENFLOW_MASK;
else
val &= ~ENET_RXCFG_ENFLOW_MASK;
enet_writel(priv, val, ENET_RXCFG_REG);
/* tx flow control (pause frame generation) */
val = enet_dma_readl(priv, ENETDMA_CFG_REG);
if (tx_en)
val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
else
val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
enet_dma_writel(priv, val, ENETDMA_CFG_REG);
}
/*
* link changed callback (from phylib)
*/
static void bcm_enet_adjust_phy_link(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct phy_device *phydev;
int status_changed;
priv = netdev_priv(dev);
phydev = priv->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
status_changed = 1;
priv->old_link = phydev->link;
}
/* reflect duplex change in mac configuration */
if (phydev->link && phydev->duplex != priv->old_duplex) {
bcm_enet_set_duplex(priv,
(phydev->duplex == DUPLEX_FULL) ? 1 : 0);
status_changed = 1;
priv->old_duplex = phydev->duplex;
}
/* enable flow control if remote advertise it (trust phylib to
* check that duplex is full */
if (phydev->link && phydev->pause != priv->old_pause) {
int rx_pause_en, tx_pause_en;
if (phydev->pause) {
/* pause was advertised by lpa and us */
rx_pause_en = 1;
tx_pause_en = 1;
} else if (!priv->pause_auto) {
/* pause setting overrided by user */
rx_pause_en = priv->pause_rx;
tx_pause_en = priv->pause_tx;
} else {
rx_pause_en = 0;
tx_pause_en = 0;
}
bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
status_changed = 1;
priv->old_pause = phydev->pause;
}
if (status_changed) {
pr_info("%s: link %s", dev->name, phydev->link ?
"UP" : "DOWN");
if (phydev->link)
pr_cont(" - %d/%s - flow control %s", phydev->speed,
DUPLEX_FULL == phydev->duplex ? "full" : "half",
phydev->pause == 1 ? "rx&tx" : "off");
pr_cont("\n");
}
}
/*
* link changed callback (if phylib is not used)
*/
static void bcm_enet_adjust_link(struct net_device *dev)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
bcm_enet_set_duplex(priv, priv->force_duplex_full);
bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
netif_carrier_on(dev);
pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
dev->name,
priv->force_speed_100 ? 100 : 10,
priv->force_duplex_full ? "full" : "half",
priv->pause_rx ? "rx" : "off",
priv->pause_tx ? "tx" : "off");
}
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
static int bcm_enet_open(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct sockaddr addr;
struct device *kdev;
struct phy_device *phydev;
int i, ret;
unsigned int size;
char phy_id[MII_BUS_ID_SIZE + 3];
void *p;
u32 val;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
if (priv->has_phy) {
/* connect to PHY */
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mac_id ? "1" : "0", priv->phy_id);
phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(kdev, "could not attach to PHY\n");
return PTR_ERR(phydev);
}
/* mask with MAC supported features */
phydev->supported &= (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_MII);
phydev->advertising = phydev->supported;
if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
phydev->advertising |= SUPPORTED_Pause;
else
phydev->advertising &= ~SUPPORTED_Pause;
dev_info(kdev, "attached PHY at address %d [%s]\n",
phydev->addr, phydev->drv->name);
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
priv->phydev = phydev;
}
/* mask all interrupts and request them */
enet_writel(priv, 0, ENET_IRMASK_REG);
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
if (ret)
goto out_phy_disconnect;
ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
if (ret)
goto out_freeirq;
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
IRQF_DISABLED, dev->name, dev);
if (ret)
goto out_freeirq_rx;
/* initialize perfect match registers */
for (i = 0; i < 4; i++) {
enet_writel(priv, 0, ENET_PML_REG(i));
enet_writel(priv, 0, ENET_PMH_REG(i));
}
/* write device mac address */
memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
bcm_enet_set_mac_address(dev, &addr);
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
GFP_KERNEL);
if (!priv->tx_skb) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
priv->tx_desc_count = priv->tx_ring_size;
priv->tx_dirty_desc = 0;
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
/* init & fill rx ring with skbs */
priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
GFP_KERNEL);
if (!priv->rx_skb) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
priv->rx_desc_count = 0;
priv->rx_dirty_desc = 0;
priv->rx_curr_desc = 0;
/* initialize flow control buffer allocation */
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
if (bcm_enet_refill_rx(dev)) {
dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out;
}
/* write rx & tx ring addresses */
enet_dma_writel(priv, priv->rx_desc_dma,
ENETDMA_RSTART_REG(priv->rx_chan));
enet_dma_writel(priv, priv->tx_desc_dma,
ENETDMA_RSTART_REG(priv->tx_chan));
/* clear remaining state ram for rx & tx channel */
enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
/* set max rx/tx length */
enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
/* set dma maximum burst len */
enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
ENETDMA_MAXBURST_REG(priv->rx_chan));
enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
ENETDMA_MAXBURST_REG(priv->tx_chan));
/* set correct transmit fifo watermark */
enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
/* set flow control low/high threshold to 1/3 / 2/3 */
val = priv->rx_ring_size / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
val = (priv->rx_ring_size * 2) / 3;
enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
/* all set, enable mac and interrupts, start dma engine and
* kick rx dma channel */
wmb();
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
ENETDMA_CHANCFG_REG(priv->rx_chan));
/* watch "mib counters about to overflow" interrupt */
enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
/* watch "packet transferred" interrupt in rx and tx */
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IR_REG(priv->tx_chan));
/* make sure we enable napi before rx interrupt */
napi_enable(&priv->napi);
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
ENETDMA_IRMASK_REG(priv->tx_chan));
if (priv->has_phy)
phy_start(priv->phydev);
else
bcm_enet_adjust_link(dev);
netif_start_queue(dev);
return 0;
out:
for (i = 0; i < priv->rx_ring_size; i++) {
struct bcm_enet_desc *desc;
if (!priv->rx_skb[i])
continue;
desc = &priv->rx_desc_cpu[i];
dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_skb[i]);
}
kfree(priv->rx_skb);
out_free_tx_skb:
kfree(priv->tx_skb);
out_free_tx_ring:
dma_free_coherent(kdev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
out_free_rx_ring:
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
out_freeirq_tx:
free_irq(priv->irq_tx, dev);
out_freeirq_rx:
free_irq(priv->irq_rx, dev);
out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
phy_disconnect(priv->phydev);
return ret;
}
/*
* disable mac
*/
static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
{
int limit;
u32 val;
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_DISABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
limit = 1000;
do {
u32 val;
val = enet_readl(priv, ENET_CTL_REG);
if (!(val & ENET_CTL_DISABLE_MASK))
break;
udelay(1);
} while (limit--);
}
/*
* disable dma in given channel
*/
static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
{
int limit;
enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
limit = 1000;
do {
u32 val;
val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
if (!(val & ENETDMA_CHANCFG_EN_MASK))
break;
udelay(1);
} while (limit--);
}
/*
* stop callback
*/
static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(priv->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
enet_writel(priv, 0, ENET_IRMASK_REG);
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
/* make sure no mib update is scheduled */
flush_scheduled_work();
/* disable dma & mac */
bcm_enet_disable_dma(priv, priv->tx_chan);
bcm_enet_disable_dma(priv, priv->rx_chan);
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
/* free the rx skb ring */
for (i = 0; i < priv->rx_ring_size; i++) {
struct bcm_enet_desc *desc;
if (!priv->rx_skb[i])
continue;
desc = &priv->rx_desc_cpu[i];
dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_skb[i]);
}
/* free remaining allocated memory */
kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
dma_free_coherent(kdev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
free_irq(priv->irq_tx, dev);
free_irq(priv->irq_rx, dev);
free_irq(dev->irq, dev);
/* release phy */
if (priv->has_phy) {
phy_disconnect(priv->phydev);
priv->phydev = NULL;
}
return 0;
}
/*
* core request to return device rx/tx stats
*/
static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
return &priv->stats;
}
/*
* ethtool callbacks
*/
struct bcm_enet_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int mib_reg;
};
#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
offsetof(struct bcm_enet_priv, m)
static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
{ "rx_packets", GEN_STAT(stats.rx_packets), -1 },
{ "tx_packets", GEN_STAT(stats.tx_packets), -1 },
{ "rx_bytes", GEN_STAT(stats.rx_bytes), -1 },
{ "tx_bytes", GEN_STAT(stats.tx_bytes), -1 },
{ "rx_errors", GEN_STAT(stats.rx_errors), -1 },
{ "tx_errors", GEN_STAT(stats.tx_errors), -1 },
{ "rx_dropped", GEN_STAT(stats.rx_dropped), -1 },
{ "tx_dropped", GEN_STAT(stats.tx_dropped), -1 },
{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
{ "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
};
#define BCM_ENET_STATS_LEN \
(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
ETH_MIB_TX_ALL_PKTS,
ETH_MIB_RX_ALL_OCTETS,
ETH_MIB_RX_ALL_PKTS,
};
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
strncpy(drvinfo->version, bcm_enet_driver_version, 32);
strncpy(drvinfo->fw_version, "N/A", 32);
strncpy(drvinfo->bus_info, "bcm63xx", 32);
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return BCM_ENET_STATS_LEN;
default:
return -EINVAL;
}
}
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
bcm_enet_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
}
break;
}
}
static void update_mib_counters(struct bcm_enet_priv *priv)
{
int i;
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
const struct bcm_enet_stats *s;
u32 val;
char *p;
s = &bcm_enet_gstrings_stats[i];
if (s->mib_reg == -1)
continue;
val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
p = (char *)priv + s->stat_offset;
if (s->sizeof_stat == sizeof(u64))
*(u64 *)p += val;
else
*(u32 *)p += val;
}
/* also empty unused mib counters to make sure mib counter
* overflow interrupt is cleared */
for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
}
static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
{
struct bcm_enet_priv *priv;
priv = container_of(t, struct bcm_enet_priv, mib_update_task);
mutex_lock(&priv->mib_update_lock);
update_mib_counters(priv);
mutex_unlock(&priv->mib_update_lock);
/* reenable mib interrupt */
if (netif_running(priv->net_dev))
enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
}
static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct bcm_enet_priv *priv;
int i;
priv = netdev_priv(netdev);
mutex_lock(&priv->mib_update_lock);
update_mib_counters(priv);
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
const struct bcm_enet_stats *s;
char *p;
s = &bcm_enet_gstrings_stats[i];
p = (char *)priv + s->stat_offset;
data[i] = (s->sizeof_stat == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
mutex_unlock(&priv->mib_update_lock);
}
static int bcm_enet_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
cmd->maxrxpkt = 0;
cmd->maxtxpkt = 0;
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_gset(priv->phydev, cmd);
} else {
cmd->autoneg = 0;
cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
cmd->duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
cmd->advertising = 0;
cmd->port = PORT_MII;
cmd->transceiver = XCVR_EXTERNAL;
}
return 0;
}
static int bcm_enet_set_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_ethtool_sset(priv->phydev, cmd);
} else {
if (cmd->autoneg ||
(cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
cmd->port != PORT_MII)
return -EINVAL;
priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
return 0;
}
}
static void bcm_enet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
/* rx/tx ring is actually only limited by memory */
ering->rx_max_pending = 8192;
ering->tx_max_pending = 8192;
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
ering->rx_pending = priv->rx_ring_size;
ering->tx_pending = priv->tx_ring_size;
}
static int bcm_enet_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct bcm_enet_priv *priv;
int was_running;
priv = netdev_priv(dev);
was_running = 0;
if (netif_running(dev)) {
bcm_enet_stop(dev);
was_running = 1;
}
priv->rx_ring_size = ering->rx_pending;
priv->tx_ring_size = ering->tx_pending;
if (was_running) {
int err;
err = bcm_enet_open(dev);
if (err)
dev_close(dev);
else
bcm_enet_set_multicast_list(dev);
}
return 0;
}
static void bcm_enet_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
ecmd->autoneg = priv->pause_auto;
ecmd->rx_pause = priv->pause_rx;
ecmd->tx_pause = priv->pause_tx;
}
static int bcm_enet_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
/* asymetric pause mode not supported,
* actually possible but integrated PHY has RO
* asym_pause bit */
return -EINVAL;
}
} else {
/* no pause autoneg on direct mii connection */
if (ecmd->autoneg)
return -EINVAL;
}
priv->pause_auto = ecmd->autoneg;
priv->pause_rx = ecmd->rx_pause;
priv->pause_tx = ecmd->tx_pause;
return 0;
}
static struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.get_settings = bcm_enet_get_settings,
.set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
if (!priv->phydev)
return -ENODEV;
return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
} else {
struct mii_if_info mii;
mii.dev = dev;
mii.mdio_read = bcm_enet_mdio_read_mii;
mii.mdio_write = bcm_enet_mdio_write_mii;
mii.phy_id = 0;
mii.phy_id_mask = 0x3f;
mii.reg_num_mask = 0x1f;
return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
}
}
/*
* calculate actual hardware mtu
*/
static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
{
int actual_mtu;
actual_mtu = mtu;
/* add ethernet header + vlan tag size */
actual_mtu += VLAN_ETH_HLEN;
if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
return -EINVAL;
/*
* setup maximum size before we get overflow mark in
* descriptor, note that this will not prevent reception of
* big frames, they will be split into multiple buffers
* anyway
*/
priv->hw_mtu = actual_mtu;
/*
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
BCMENET_DMA_MAXBURST * 4);
return 0;
}
/*
* adjust mtu, can't be called while device is running
*/
static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
{
int ret;
if (netif_running(dev))
return -EBUSY;
ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
if (ret)
return ret;
dev->mtu = new_mtu;
return 0;
}
/*
* preinit hardware to allow mii operation while device is down
*/
static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
{
u32 val;
int limit;
/* make sure mac is disabled */
bcm_enet_disable_mac(priv);
/* soft reset mac */
val = ENET_CTL_SRESET_MASK;
enet_writel(priv, val, ENET_CTL_REG);
wmb();
limit = 1000;
do {
val = enet_readl(priv, ENET_CTL_REG);
if (!(val & ENET_CTL_SRESET_MASK))
break;
udelay(1);
} while (limit--);
/* select correct mii interface */
val = enet_readl(priv, ENET_CTL_REG);
if (priv->use_external_mii)
val |= ENET_CTL_EPHYSEL_MASK;
else
val &= ~ENET_CTL_EPHYSEL_MASK;
enet_writel(priv, val, ENET_CTL_REG);
/* turn on mdc clock */
enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
/* set mib counters to self-clear when read */
val = enet_readl(priv, ENET_MIBCTL_REG);
val |= ENET_MIBCTL_RDCLEAR_MASK;
enet_writel(priv, val, ENET_MIBCTL_REG);
}
static const struct net_device_ops bcm_enet_ops = {
.ndo_open = bcm_enet_open,
.ndo_stop = bcm_enet_stop,
.ndo_start_xmit = bcm_enet_start_xmit,
.ndo_get_stats = bcm_enet_get_stats,
.ndo_set_mac_address = bcm_enet_set_mac_address,
.ndo_set_multicast_list = bcm_enet_set_multicast_list,
.ndo_do_ioctl = bcm_enet_ioctl,
.ndo_change_mtu = bcm_enet_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_enet_netpoll,
#endif
};
/*
* allocate netdevice, request register memory and register device.
*/
static int __devinit bcm_enet_probe(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
* called in the same order we register devices (correct ?) */
if (!bcm_enet_shared_base)
return -ENODEV;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
ret = compute_hw_mtu(priv, dev->mtu);
if (ret)
goto out;
iomem_size = res_mem->end - res_mem->start + 1;
if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
ret = -EBUSY;
goto out;
}
priv->base = ioremap(res_mem->start, iomem_size);
if (priv->base == NULL) {
ret = -ENOMEM;
goto out_release_mem;
}
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
priv->mac_id = pdev->id;
/* get rx & tx dma channel id for this mac */
if (priv->mac_id == 0) {
priv->rx_chan = 0;
priv->tx_chan = 1;
clk_name = "enet0";
} else {
priv->rx_chan = 2;
priv->tx_chan = 3;
clk_name = "enet1";
}
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out_unmap;
}
clk_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
pd = pdev->dev.platform_data;
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
priv->phy_interrupt = pd->phy_interrupt;
priv->use_external_mii = !pd->use_internal_phy;
priv->pause_auto = pd->pause_auto;
priv->pause_rx = pd->pause_rx;
priv->pause_tx = pd->pause_tx;
priv->force_duplex_full = pd->force_duplex_full;
priv->force_speed_100 = pd->force_speed_100;
}
if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
priv->phy_clk = clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
ret = PTR_ERR(priv->phy_clk);
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
clk_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
bcm_enet_hw_preinit(priv);
/* MII bus registration */
if (priv->has_phy) {
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus) {
ret = -ENOMEM;
goto out_uninit_hw;
}
bus = priv->mii_bus;
bus->name = "bcm63xx_enet MII bus";
bus->parent = &pdev->dev;
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
sprintf(bus->id, "%d", priv->mac_id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
}
if (priv->has_phy_interrupt)
bus->irq[priv->phy_id] = priv->phy_interrupt;
else
bus->irq[priv->phy_id] = PHY_POLL;
ret = mdiobus_register(bus);
if (ret) {
dev_err(&pdev->dev, "unable to register mdio bus\n");
goto out_free_mdio;
}
} else {
/* run platform code to initialize PHY device */
if (pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n");
goto out_uninit_hw;
}
}
spin_lock_init(&priv->rx_lock);
/* init rx timeout (used for oom) */
init_timer(&priv->rx_timeout);
priv->rx_timeout.function = bcm_enet_refill_rx_timer;
priv->rx_timeout.data = (unsigned long)dev;
/* init the mib update lock&work */
mutex_init(&priv->mib_update_lock);
INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
/* zero mib counters */
for (i = 0; i < ENET_MIB_REG_COUNT; i++)
enet_writel(priv, 0, ENET_MIB_REG(i));
/* register netdevice */
dev->netdev_ops = &bcm_enet_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
if (ret)
goto out_unregister_mdio;
netif_carrier_off(dev);
platform_set_drvdata(pdev, dev);
priv->pdev = pdev;
priv->net_dev = dev;
return 0;
out_unregister_mdio:
if (priv->mii_bus) {
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
}
out_free_mdio:
if (priv->mii_bus)
mdiobus_free(priv->mii_bus);
out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
clk_disable(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
clk_disable(priv->mac_clk);
clk_put(priv->mac_clk);
out_unmap:
iounmap(priv->base);
out_release_mem:
release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
}
/*
* exit func, stops hardware and unregisters netdevice
*/
static int __devexit bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
priv = netdev_priv(dev);
unregister_netdev(dev);
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
pd = pdev->dev.platform_data;
if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii);
}
/* release device resources */
iounmap(priv->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
/* disable hw block clocks */
if (priv->phy_clk) {
clk_disable(priv->phy_clk);
clk_put(priv->phy_clk);
}
clk_disable(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = __devexit_p(bcm_enet_remove),
.driver = {
.name = "bcm63xx_enet",
.owner = THIS_MODULE,
},
};
/*
* reserve & remap memory space shared between all macs
*/
static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
iomem_size = res->end - res->start + 1;
if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
return -EBUSY;
bcm_enet_shared_base = ioremap(res->start, iomem_size);
if (!bcm_enet_shared_base) {
release_mem_region(res->start, iomem_size);
return -ENOMEM;
}
return 0;
}
static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
{
struct resource *res;
iounmap(bcm_enet_shared_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
return 0;
}
/*
* this "shared" driver is needed because both macs share a single
* address space
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
.remove = __devexit_p(bcm_enet_shared_remove),
.driver = {
.name = "bcm63xx_enet_shared",
.owner = THIS_MODULE,
},
};
/*
* entry point
*/
static int __init bcm_enet_init(void)
{
int ret;
ret = platform_driver_register(&bcm63xx_enet_shared_driver);
if (ret)
return ret;
ret = platform_driver_register(&bcm63xx_enet_driver);
if (ret)
platform_driver_unregister(&bcm63xx_enet_shared_driver);
return ret;
}
static void __exit bcm_enet_exit(void)
{
platform_driver_unregister(&bcm63xx_enet_driver);
platform_driver_unregister(&bcm63xx_enet_shared_driver);
}
module_init(bcm_enet_init);
module_exit(bcm_enet_exit);
MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jamieg71/msm-shamu-3.10 | drivers/media/usb/gspca/stv0680.c | 2267 | 10114 | /*
* STV0680 USB Camera Driver
*
* Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the in kernel v4l1 stv680 driver:
*
* STV0680 USB Camera Driver, by Kevin Sisson (kjsisson@bellsouth.net)
*
* Thanks to STMicroelectronics for information on the usb commands, and
* to Steve Miller at STM for his help and encouragement while I was
* writing this driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "stv0680"
#include "gspca.h"
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("STV0680 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct v4l2_pix_format mode;
u8 orig_mode;
u8 video_mode;
u8 current_mode;
};
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
int ret = -1;
u8 req_type = 0;
unsigned int pipe = 0;
switch (set) {
case 0: /* 0xc1 */
req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
pipe = usb_rcvctrlpipe(gspca_dev->dev, 0);
break;
case 1: /* 0x41 */
req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
pipe = usb_sndctrlpipe(gspca_dev->dev, 0);
break;
case 2: /* 0x80 */
req_type = USB_DIR_IN | USB_RECIP_DEVICE;
pipe = usb_rcvctrlpipe(gspca_dev->dev, 0);
break;
case 3: /* 0x40 */
req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
pipe = usb_sndctrlpipe(gspca_dev->dev, 0);
break;
}
ret = usb_control_msg(gspca_dev->dev, pipe,
req, req_type,
val, 0, gspca_dev->usb_buf, size, 500);
if ((ret < 0) && (req != 0x0a))
pr_err("usb_control_msg error %i, request = 0x%x, error = %i\n",
set, req, ret);
return ret;
}
static int stv0680_handle_error(struct gspca_dev *gspca_dev, int ret)
{
stv_sndctrl(gspca_dev, 0, 0x80, 0, 0x02); /* Get Last Error */
PERR("last error: %i, command = 0x%x",
gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
return ret;
}
static int stv0680_get_video_mode(struct gspca_dev *gspca_dev)
{
/* Note not sure if this init of usb_buf is really necessary */
memset(gspca_dev->usb_buf, 0, 8);
gspca_dev->usb_buf[0] = 0x0f;
if (stv_sndctrl(gspca_dev, 0, 0x87, 0, 0x08) != 0x08) {
PERR("Get_Camera_Mode failed");
return stv0680_handle_error(gspca_dev, -EIO);
}
return gspca_dev->usb_buf[0]; /* 01 = VGA, 03 = QVGA, 00 = CIF */
}
static int stv0680_set_video_mode(struct gspca_dev *gspca_dev, u8 mode)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->current_mode == mode)
return 0;
memset(gspca_dev->usb_buf, 0, 8);
gspca_dev->usb_buf[0] = mode;
if (stv_sndctrl(gspca_dev, 3, 0x07, 0x0100, 0x08) != 0x08) {
PERR("Set_Camera_Mode failed");
return stv0680_handle_error(gspca_dev, -EIO);
}
/* Verify we got what we've asked for */
if (stv0680_get_video_mode(gspca_dev) != mode) {
PERR("Error setting camera video mode!");
return -EIO;
}
sd->current_mode = mode;
return 0;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
int ret;
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
/* Give the camera some time to settle, otherwise initalization will
fail on hotplug, and yes it really needs a full second. */
msleep(1000);
/* ping camera to be sure STV0680 is present */
if (stv_sndctrl(gspca_dev, 0, 0x88, 0x5678, 0x02) != 0x02 ||
gspca_dev->usb_buf[0] != 0x56 || gspca_dev->usb_buf[1] != 0x78) {
PERR("STV(e): camera ping failed!!");
return stv0680_handle_error(gspca_dev, -ENODEV);
}
/* get camera descriptor */
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x09) != 0x09)
return stv0680_handle_error(gspca_dev, -ENODEV);
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x22) != 0x22 ||
gspca_dev->usb_buf[7] != 0xa0 || gspca_dev->usb_buf[8] != 0x23) {
PERR("Could not get descriptor 0200.");
return stv0680_handle_error(gspca_dev, -ENODEV);
}
if (stv_sndctrl(gspca_dev, 0, 0x8a, 0, 0x02) != 0x02)
return stv0680_handle_error(gspca_dev, -ENODEV);
if (stv_sndctrl(gspca_dev, 0, 0x8b, 0, 0x24) != 0x24)
return stv0680_handle_error(gspca_dev, -ENODEV);
if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10)
return stv0680_handle_error(gspca_dev, -ENODEV);
if (!(gspca_dev->usb_buf[7] & 0x09)) {
PERR("Camera supports neither CIF nor QVGA mode");
return -ENODEV;
}
if (gspca_dev->usb_buf[7] & 0x01)
PDEBUG(D_PROBE, "Camera supports CIF mode");
if (gspca_dev->usb_buf[7] & 0x02)
PDEBUG(D_PROBE, "Camera supports VGA mode");
if (gspca_dev->usb_buf[7] & 0x04)
PDEBUG(D_PROBE, "Camera supports QCIF mode");
if (gspca_dev->usb_buf[7] & 0x08)
PDEBUG(D_PROBE, "Camera supports QVGA mode");
if (gspca_dev->usb_buf[7] & 0x01)
sd->video_mode = 0x00; /* CIF */
else
sd->video_mode = 0x03; /* QVGA */
/* FW rev, ASIC rev, sensor ID */
PDEBUG(D_PROBE, "Firmware rev is %i.%i",
gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
PDEBUG(D_PROBE, "ASIC rev is %i.%i",
gspca_dev->usb_buf[2], gspca_dev->usb_buf[3]);
PDEBUG(D_PROBE, "Sensor ID is %i",
(gspca_dev->usb_buf[4]*16) + (gspca_dev->usb_buf[5]>>4));
ret = stv0680_get_video_mode(gspca_dev);
if (ret < 0)
return ret;
sd->current_mode = sd->orig_mode = ret;
ret = stv0680_set_video_mode(gspca_dev, sd->video_mode);
if (ret < 0)
return ret;
/* Get mode details */
if (stv_sndctrl(gspca_dev, 0, 0x8f, 0, 0x10) != 0x10)
return stv0680_handle_error(gspca_dev, -EIO);
cam->bulk = 1;
cam->bulk_nurbs = 1; /* The cam cannot handle more */
cam->bulk_size = (gspca_dev->usb_buf[0] << 24) |
(gspca_dev->usb_buf[1] << 16) |
(gspca_dev->usb_buf[2] << 8) |
(gspca_dev->usb_buf[3]);
sd->mode.width = (gspca_dev->usb_buf[4] << 8) |
(gspca_dev->usb_buf[5]); /* 322, 356, 644 */
sd->mode.height = (gspca_dev->usb_buf[6] << 8) |
(gspca_dev->usb_buf[7]); /* 242, 292, 484 */
sd->mode.pixelformat = V4L2_PIX_FMT_STV0680;
sd->mode.field = V4L2_FIELD_NONE;
sd->mode.bytesperline = sd->mode.width;
sd->mode.sizeimage = cam->bulk_size;
sd->mode.colorspace = V4L2_COLORSPACE_SRGB;
/* origGain = gspca_dev->usb_buf[12]; */
cam->cam_mode = &sd->mode;
cam->nmodes = 1;
ret = stv0680_set_video_mode(gspca_dev, sd->orig_mode);
if (ret < 0)
return ret;
if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 ||
gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) {
pr_err("Could not get descriptor 0100\n");
return stv0680_handle_error(gspca_dev, -EIO);
}
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
return 0;
}
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
int ret;
struct sd *sd = (struct sd *) gspca_dev;
ret = stv0680_set_video_mode(gspca_dev, sd->video_mode);
if (ret < 0)
return ret;
if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10)
return stv0680_handle_error(gspca_dev, -EIO);
/* Start stream at:
0x0000 = CIF (352x288)
0x0100 = VGA (640x480)
0x0300 = QVGA (320x240) */
if (stv_sndctrl(gspca_dev, 1, 0x09, sd->video_mode << 8, 0x0) != 0x0)
return stv0680_handle_error(gspca_dev, -EIO);
return 0;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* This is a high priority command; it stops all lower order cmds */
if (stv_sndctrl(gspca_dev, 1, 0x04, 0x0000, 0x0) != 0x0)
stv0680_handle_error(gspca_dev, -EIO);
}
static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (!sd->gspca_dev.present)
return;
stv0680_set_video_mode(gspca_dev, sd->orig_mode);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data,
int len)
{
struct sd *sd = (struct sd *) gspca_dev;
/* Every now and then the camera sends a 16 byte packet, no idea
what it contains, but it is not image data, when this
happens the frame received before this packet is corrupt,
so discard it. */
if (len != sd->mode.sizeimage) {
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
/* Finish the previous frame, we do this upon reception of the next
packet, even though it is already complete so that the strange 16
byte packets send after a corrupt frame can discard it. */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
/* Store the just received frame */
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0553, 0x0202)},
{USB_DEVICE(0x041e, 0x4007)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
javilonas/Lonas_KL-GT-I9300-Sammy | drivers/leds/leds-88pm860x.c | 2779 | 5996 | /*
* LED driver for Marvell 88PM860x
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/mfd/88pm860x.h>
#define LED_PWM_SHIFT (3)
#define LED_PWM_MASK (0x1F)
#define LED_CURRENT_MASK (0x07 << 5)
#define LED_BLINK_ON_MASK (0x07)
#define LED_BLINK_MASK (0x7F)
#define LED_BLINK_ON(x) ((x & 0x7) * 66 + 66)
#define LED_BLINK_ON_MIN LED_BLINK_ON(0)
#define LED_BLINK_ON_MAX LED_BLINK_ON(0x7)
#define LED_ON_CONTINUOUS (0x0F << 3)
#define LED_TO_ON(x) ((x - 66) / 66)
#define LED1_BLINK_EN (1 << 1)
#define LED2_BLINK_EN (1 << 2)
struct pm860x_led {
struct led_classdev cdev;
struct i2c_client *i2c;
struct work_struct work;
struct pm860x_chip *chip;
struct mutex lock;
char name[MFD_NAME_SIZE];
int port;
int iset;
unsigned char brightness;
unsigned char current_brightness;
int blink_data;
int blink_time;
int blink_on;
int blink_off;
};
/* return offset of color register */
static inline int __led_off(int port)
{
int ret = -EINVAL;
switch (port) {
case PM8606_LED1_RED:
case PM8606_LED1_GREEN:
case PM8606_LED1_BLUE:
ret = port - PM8606_LED1_RED + PM8606_RGB1B;
break;
case PM8606_LED2_RED:
case PM8606_LED2_GREEN:
case PM8606_LED2_BLUE:
ret = port - PM8606_LED2_RED + PM8606_RGB2B;
break;
}
return ret;
}
/* return offset of blink register */
static inline int __blink_off(int port)
{
int ret = -EINVAL;
switch (port) {
case PM8606_LED1_RED:
case PM8606_LED1_GREEN:
case PM8606_LED1_BLUE:
ret = PM8606_RGB1A;
break;
case PM8606_LED2_RED:
case PM8606_LED2_GREEN:
case PM8606_LED2_BLUE:
ret = PM8606_RGB2A;
break;
}
return ret;
}
static inline int __blink_ctl_mask(int port)
{
int ret = -EINVAL;
switch (port) {
case PM8606_LED1_RED:
case PM8606_LED1_GREEN:
case PM8606_LED1_BLUE:
ret = LED1_BLINK_EN;
break;
case PM8606_LED2_RED:
case PM8606_LED2_GREEN:
case PM8606_LED2_BLUE:
ret = LED2_BLINK_EN;
break;
}
return ret;
}
static void pm860x_led_work(struct work_struct *work)
{
struct pm860x_led *led;
struct pm860x_chip *chip;
unsigned char buf[3];
int mask, ret;
led = container_of(work, struct pm860x_led, work);
chip = led->chip;
mutex_lock(&led->lock);
if ((led->current_brightness == 0) && led->brightness) {
if (led->iset) {
pm860x_set_bits(led->i2c, __led_off(led->port),
LED_CURRENT_MASK, led->iset);
}
pm860x_set_bits(led->i2c, __blink_off(led->port),
LED_BLINK_MASK, LED_ON_CONTINUOUS);
mask = __blink_ctl_mask(led->port);
pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
}
pm860x_set_bits(led->i2c, __led_off(led->port), LED_PWM_MASK,
led->brightness);
if (led->brightness == 0) {
pm860x_bulk_read(led->i2c, __led_off(led->port), 3, buf);
ret = buf[0] & LED_PWM_MASK;
ret |= buf[1] & LED_PWM_MASK;
ret |= buf[2] & LED_PWM_MASK;
if (ret == 0) {
/* unset current since no led is lighting */
pm860x_set_bits(led->i2c, __led_off(led->port),
LED_CURRENT_MASK, 0);
mask = __blink_ctl_mask(led->port);
pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, 0);
}
}
led->current_brightness = led->brightness;
dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
__led_off(led->port), led->brightness);
mutex_unlock(&led->lock);
}
static void pm860x_led_set(struct led_classdev *cdev,
enum led_brightness value)
{
struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev);
data->brightness = value >> 3;
schedule_work(&data->work);
}
static int pm860x_led_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_led_pdata *pdata;
struct pm860x_led *data;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No I/O resource!\n");
return -EINVAL;
}
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data!\n");
return -EINVAL;
}
data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
strncpy(data->name, res->name, MFD_NAME_SIZE - 1);
dev_set_drvdata(&pdev->dev, data);
data->chip = chip;
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->iset = pdata->iset;
data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "check device failed\n");
kfree(data);
return -EINVAL;
}
data->current_brightness = 0;
data->cdev.name = data->name;
data->cdev.brightness_set = pm860x_led_set;
mutex_init(&data->lock);
INIT_WORK(&data->work, pm860x_led_work);
ret = led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
goto out;
}
pm860x_led_set(&data->cdev, 0);
return 0;
out:
kfree(data);
return ret;
}
static int pm860x_led_remove(struct platform_device *pdev)
{
struct pm860x_led *data = platform_get_drvdata(pdev);
led_classdev_unregister(&data->cdev);
kfree(data);
return 0;
}
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
.owner = THIS_MODULE,
},
.probe = pm860x_led_probe,
.remove = pm860x_led_remove,
};
static int __devinit pm860x_led_init(void)
{
return platform_driver_register(&pm860x_led_driver);
}
module_init(pm860x_led_init);
static void __devexit pm860x_led_exit(void)
{
platform_driver_unregister(&pm860x_led_driver);
}
module_exit(pm860x_led_exit);
MODULE_DESCRIPTION("LED driver for Marvell PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:88pm860x-led");
| gpl-2.0 |
Snuzzo/funky_jewel | fs/ext3/acl.c | 4827 | 9913 | /*
* linux/fs/ext3/acl.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
#include "ext3.h"
#include "xattr.h"
#include "acl.h"
/*
* Convert from filesystem to in-memory representation.
*/
static struct posix_acl *
ext3_acl_from_disk(const void *value, size_t size)
{
const char *end = (char *)value + size;
int n, count;
struct posix_acl *acl;
if (!value)
return NULL;
if (size < sizeof(ext3_acl_header))
return ERR_PTR(-EINVAL);
if (((ext3_acl_header *)value)->a_version !=
cpu_to_le32(EXT3_ACL_VERSION))
return ERR_PTR(-EINVAL);
value = (char *)value + sizeof(ext3_acl_header);
count = ext3_acl_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
for (n=0; n < count; n++) {
ext3_acl_entry *entry =
(ext3_acl_entry *)value;
if ((char *)value + sizeof(ext3_acl_entry_short) > end)
goto fail;
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
switch(acl->a_entries[n].e_tag) {
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
value = (char *)value +
sizeof(ext3_acl_entry_short);
acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
break;
case ACL_USER:
case ACL_GROUP:
value = (char *)value + sizeof(ext3_acl_entry);
if ((char *)value > end)
goto fail;
acl->a_entries[n].e_id =
le32_to_cpu(entry->e_id);
break;
default:
goto fail;
}
}
if (value != end)
goto fail;
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
/*
* Convert from in-memory to filesystem representation.
*/
static void *
ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
{
ext3_acl_header *ext_acl;
char *e;
size_t n;
*size = ext3_acl_size(acl->a_count);
ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
sizeof(ext3_acl_entry), GFP_NOFS);
if (!ext_acl)
return ERR_PTR(-ENOMEM);
ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
e = (char *)ext_acl + sizeof(ext3_acl_header);
for (n=0; n < acl->a_count; n++) {
ext3_acl_entry *entry = (ext3_acl_entry *)e;
entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
switch(acl->a_entries[n].e_tag) {
case ACL_USER:
case ACL_GROUP:
entry->e_id =
cpu_to_le32(acl->a_entries[n].e_id);
e += sizeof(ext3_acl_entry);
break;
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
e += sizeof(ext3_acl_entry_short);
break;
default:
goto fail;
}
}
return (char *)ext_acl;
fail:
kfree(ext_acl);
return ERR_PTR(-EINVAL);
}
/*
* Inode operation get_posix_acl().
*
* inode->i_mutex: don't care
*/
struct posix_acl *
ext3_get_acl(struct inode *inode, int type)
{
int name_index;
char *value = NULL;
struct posix_acl *acl;
int retval;
if (!test_opt(inode->i_sb, POSIX_ACL))
return NULL;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
return acl;
switch (type) {
case ACL_TYPE_ACCESS:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
break;
default:
BUG();
}
retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
value = kmalloc(retval, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
retval = ext3_xattr_get(inode, name_index, "", value, retval);
}
if (retval > 0)
acl = ext3_acl_from_disk(value, retval);
else if (retval == -ENODATA || retval == -ENOSYS)
acl = NULL;
else
acl = ERR_PTR(retval);
kfree(value);
if (!IS_ERR(acl))
set_cached_acl(inode, type, acl);
return acl;
}
/*
* Set the access or default ACL of an inode.
*
* inode->i_mutex: down unless called from ext3_new_inode
*/
static int
ext3_set_acl(handle_t *handle, struct inode *inode, int type,
struct posix_acl *acl)
{
int name_index;
void *value = NULL;
size_t size = 0;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
switch(type) {
case ACL_TYPE_ACCESS:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
if (acl) {
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
else {
inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
if (error == 0)
acl = NULL;
}
}
break;
case ACL_TYPE_DEFAULT:
name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
default:
return -EINVAL;
}
if (acl) {
value = ext3_acl_to_disk(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
}
error = ext3_xattr_set_handle(handle, inode, name_index, "",
value, size, 0);
kfree(value);
if (!error)
set_cached_acl(inode, type, acl);
return error;
}
/*
* Initialize the ACLs of a new inode. Called from ext3_new_inode.
*
* dir->i_mutex: down
* inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
int error = 0;
if (!S_ISLNK(inode->i_mode)) {
if (test_opt(dir->i_sb, POSIX_ACL)) {
acl = ext3_get_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
if (!acl)
inode->i_mode &= ~current_umask();
}
if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
if (S_ISDIR(inode->i_mode)) {
error = ext3_set_acl(handle, inode,
ACL_TYPE_DEFAULT, acl);
if (error)
goto cleanup;
}
error = posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
if (error < 0)
return error;
if (error > 0) {
/* This is an extended ACL */
error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
}
}
cleanup:
posix_acl_release(acl);
return error;
}
/*
* Does chmod for an inode that may have an Access Control List. The
* inode->i_mode field must be updated to the desired value by the caller
* before calling this function.
* Returns 0 on success, or a negative error number.
*
* We change the ACL rather than storing some ACL entries in the file
* mode permission bits (which would be more efficient), because that
* would break once additional permissions (like ACL_APPEND, ACL_DELETE
* for directories) are added. There are no more bits available in the
* file mode.
*
* inode->i_mutex: down
*/
int
ext3_acl_chmod(struct inode *inode)
{
struct posix_acl *acl;
handle_t *handle;
int retries = 0;
int error;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!test_opt(inode->i_sb, POSIX_ACL))
return 0;
acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (error)
return error;
retry:
handle = ext3_journal_start(inode,
EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
ext3_std_error(inode->i_sb, error);
goto out;
}
error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, acl);
ext3_journal_stop(handle);
if (error == -ENOSPC &&
ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
out:
posix_acl_release(acl);
return error;
}
/*
* Extended attribute handlers
*/
static size_t
ext3_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len,
const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
return size;
}
static size_t
ext3_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len,
const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
return size;
}
static int
ext3_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
size_t size, int type)
{
struct posix_acl *acl;
int error;
if (strcmp(name, "") != 0)
return -EINVAL;
if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
acl = ext3_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
error = posix_acl_to_xattr(acl, buffer, size);
posix_acl_release(acl);
return error;
}
static int
ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags, int type)
{
struct inode *inode = dentry->d_inode;
handle_t *handle;
struct posix_acl *acl;
int error, retries = 0;
if (strcmp(name, "") != 0)
return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
acl = posix_acl_from_xattr(value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
else if (acl) {
error = posix_acl_valid(acl);
if (error)
goto release_and_out;
}
} else
acl = NULL;
retry:
handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
error = ext3_set_acl(handle, inode, type, acl);
ext3_journal_stop(handle);
if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
release_and_out:
posix_acl_release(acl);
return error;
}
const struct xattr_handler ext3_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.flags = ACL_TYPE_ACCESS,
.list = ext3_xattr_list_acl_access,
.get = ext3_xattr_get_acl,
.set = ext3_xattr_set_acl,
};
const struct xattr_handler ext3_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
.list = ext3_xattr_list_acl_default,
.get = ext3_xattr_get_acl,
.set = ext3_xattr_set_acl,
};
| gpl-2.0 |
nitrogen-devs/android_NitrogenEx_kernel | arch/arm/mach-davinci/board-da830-evm.c | 4827 | 17359 | /*
* TI DA830/OMAP L137 EVM board
*
* Author: Mark A. Greer <mgreer@mvista.com>
* Derived from: arch/arm/mach-davinci/board-dm644x-evm.c
*
* 2007, 2009 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/pcf857x.h>
#include <linux/i2c/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/cp_intc.h>
#include <mach/mux.h>
#include <mach/nand.h>
#include <mach/da8xx.h>
#include <mach/usb.h>
#include <mach/aemif.h>
#include <mach/spi.h>
#define DA830_EVM_PHY_ID ""
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
*/
#define ON_BD_USB_DRV GPIO_TO_PIN(1, 15)
#define ON_BD_USB_OVC GPIO_TO_PIN(2, 4)
static const short da830_evm_usb11_pins[] = {
DA830_GPIO1_15, DA830_GPIO2_4,
-1
};
static da8xx_ocic_handler_t da830_evm_usb_ocic_handler;
static int da830_evm_usb_set_power(unsigned port, int on)
{
gpio_set_value(ON_BD_USB_DRV, on);
return 0;
}
static int da830_evm_usb_get_power(unsigned port)
{
return gpio_get_value(ON_BD_USB_DRV);
}
static int da830_evm_usb_get_oci(unsigned port)
{
return !gpio_get_value(ON_BD_USB_OVC);
}
static irqreturn_t da830_evm_usb_ocic_irq(int, void *);
static int da830_evm_usb_ocic_notify(da8xx_ocic_handler_t handler)
{
int irq = gpio_to_irq(ON_BD_USB_OVC);
int error = 0;
if (handler != NULL) {
da830_evm_usb_ocic_handler = handler;
error = request_irq(irq, da830_evm_usb_ocic_irq, IRQF_DISABLED |
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"OHCI over-current indicator", NULL);
if (error)
printk(KERN_ERR "%s: could not request IRQ to watch "
"over-current indicator changes\n", __func__);
} else
free_irq(irq, NULL);
return error;
}
static struct da8xx_ohci_root_hub da830_evm_usb11_pdata = {
.set_power = da830_evm_usb_set_power,
.get_power = da830_evm_usb_get_power,
.get_oci = da830_evm_usb_get_oci,
.ocic_notify = da830_evm_usb_ocic_notify,
/* TPS2065 switch @ 5V */
.potpgt = (3 + 1) / 2, /* 3 ms max */
};
static irqreturn_t da830_evm_usb_ocic_irq(int irq, void *dev_id)
{
da830_evm_usb_ocic_handler(&da830_evm_usb11_pdata, 1);
return IRQ_HANDLED;
}
static __init void da830_evm_usb_init(void)
{
u32 cfgchip2;
int ret;
/*
* Set up USB clock/mode in the CFGCHIP2 register.
* FYI: CFGCHIP2 is 0x0000ef00 initially.
*/
cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
/* USB2.0 PHY reference clock is 24 MHz */
cfgchip2 &= ~CFGCHIP2_REFFREQ;
cfgchip2 |= CFGCHIP2_REFFREQ_24MHZ;
/*
* Select internal reference clock for USB 2.0 PHY
* and use it as a clock source for USB 1.1 PHY
* (this is the default setting anyway).
*/
cfgchip2 &= ~CFGCHIP2_USB1PHYCLKMUX;
cfgchip2 |= CFGCHIP2_USB2PHYCLKMUX;
/*
* We have to override VBUS/ID signals when MUSB is configured into the
* host-only mode -- ID pin will float if no cable is connected, so the
* controller won't be able to drive VBUS thinking that it's a B-device.
* Otherwise, we want to use the OTG mode and enable VBUS comparators.
*/
cfgchip2 &= ~CFGCHIP2_OTGMODE;
#ifdef CONFIG_USB_MUSB_HOST
cfgchip2 |= CFGCHIP2_FORCE_HOST;
#else
cfgchip2 |= CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN;
#endif
__raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
/* USB_REFCLKIN is not used. */
ret = davinci_cfg_reg(DA830_USB0_DRVVBUS);
if (ret)
pr_warning("%s: USB 2.0 PinMux setup failed: %d\n",
__func__, ret);
else {
/*
* TPS2065 switch @ 5V supplies 1 A (sustains 1.5 A),
* with the power on to power good time of 3 ms.
*/
ret = da8xx_register_usb20(1000, 3);
if (ret)
pr_warning("%s: USB 2.0 registration failed: %d\n",
__func__, ret);
}
ret = davinci_cfg_reg_list(da830_evm_usb11_pins);
if (ret) {
pr_warning("%s: USB 1.1 PinMux setup failed: %d\n",
__func__, ret);
return;
}
ret = gpio_request(ON_BD_USB_DRV, "ON_BD_USB_DRV");
if (ret) {
printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
"power control: %d\n", __func__, ret);
return;
}
gpio_direction_output(ON_BD_USB_DRV, 0);
ret = gpio_request(ON_BD_USB_OVC, "ON_BD_USB_OVC");
if (ret) {
printk(KERN_ERR "%s: failed to request GPIO for USB 1.1 port "
"over-current indicator: %d\n", __func__, ret);
return;
}
gpio_direction_input(ON_BD_USB_OVC);
ret = da8xx_register_usb11(&da830_evm_usb11_pdata);
if (ret)
pr_warning("%s: USB 1.1 registration failed: %d\n",
__func__, ret);
}
static struct davinci_uart_config da830_evm_uart_config __initdata = {
.enabled_uarts = 0x7,
};
static const short da830_evm_mcasp1_pins[] = {
DA830_AHCLKX1, DA830_ACLKX1, DA830_AFSX1, DA830_AHCLKR1, DA830_AFSR1,
DA830_AMUTE1, DA830_AXR1_0, DA830_AXR1_1, DA830_AXR1_2, DA830_AXR1_5,
DA830_ACLKR1, DA830_AXR1_6, DA830_AXR1_7, DA830_AXR1_8, DA830_AXR1_10,
DA830_AXR1_11,
-1
};
static u8 da830_iis_serializer_direction[] = {
RX_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
INACTIVE_MODE, TX_MODE, INACTIVE_MODE, INACTIVE_MODE,
INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE, INACTIVE_MODE,
};
static struct snd_platform_data da830_evm_snd_data = {
.tx_dma_offset = 0x2000,
.rx_dma_offset = 0x2000,
.op_mode = DAVINCI_MCASP_IIS_MODE,
.num_serializer = ARRAY_SIZE(da830_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da830_iis_serializer_direction,
.asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
};
/*
* GPIO2[1] is used as MMC_SD_WP and GPIO2[2] as MMC_SD_INS.
*/
static const short da830_evm_mmc_sd_pins[] = {
DA830_MMCSD_DAT_0, DA830_MMCSD_DAT_1, DA830_MMCSD_DAT_2,
DA830_MMCSD_DAT_3, DA830_MMCSD_DAT_4, DA830_MMCSD_DAT_5,
DA830_MMCSD_DAT_6, DA830_MMCSD_DAT_7, DA830_MMCSD_CLK,
DA830_MMCSD_CMD, DA830_GPIO2_1, DA830_GPIO2_2,
-1
};
#define DA830_MMCSD_WP_PIN GPIO_TO_PIN(2, 1)
#define DA830_MMCSD_CD_PIN GPIO_TO_PIN(2, 2)
static int da830_evm_mmc_get_ro(int index)
{
return gpio_get_value(DA830_MMCSD_WP_PIN);
}
static int da830_evm_mmc_get_cd(int index)
{
return !gpio_get_value(DA830_MMCSD_CD_PIN);
}
static struct davinci_mmc_config da830_evm_mmc_config = {
.get_ro = da830_evm_mmc_get_ro,
.get_cd = da830_evm_mmc_get_cd,
.wires = 8,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
.version = MMC_CTLR_VERSION_2,
};
static inline void da830_evm_init_mmc(void)
{
int ret;
ret = davinci_cfg_reg_list(da830_evm_mmc_sd_pins);
if (ret) {
pr_warning("da830_evm_init: mmc/sd mux setup failed: %d\n",
ret);
return;
}
ret = gpio_request(DA830_MMCSD_WP_PIN, "MMC WP");
if (ret) {
pr_warning("da830_evm_init: can not open GPIO %d\n",
DA830_MMCSD_WP_PIN);
return;
}
gpio_direction_input(DA830_MMCSD_WP_PIN);
ret = gpio_request(DA830_MMCSD_CD_PIN, "MMC CD\n");
if (ret) {
pr_warning("da830_evm_init: can not open GPIO %d\n",
DA830_MMCSD_CD_PIN);
return;
}
gpio_direction_input(DA830_MMCSD_CD_PIN);
ret = da8xx_register_mmcsd0(&da830_evm_mmc_config);
if (ret) {
pr_warning("da830_evm_init: mmc/sd registration failed: %d\n",
ret);
gpio_free(DA830_MMCSD_WP_PIN);
}
}
/*
* UI board NAND/NOR flashes only use 8-bit data bus.
*/
static const short da830_evm_emif25_pins[] = {
DA830_EMA_D_0, DA830_EMA_D_1, DA830_EMA_D_2, DA830_EMA_D_3,
DA830_EMA_D_4, DA830_EMA_D_5, DA830_EMA_D_6, DA830_EMA_D_7,
DA830_EMA_A_0, DA830_EMA_A_1, DA830_EMA_A_2, DA830_EMA_A_3,
DA830_EMA_A_4, DA830_EMA_A_5, DA830_EMA_A_6, DA830_EMA_A_7,
DA830_EMA_A_8, DA830_EMA_A_9, DA830_EMA_A_10, DA830_EMA_A_11,
DA830_EMA_A_12, DA830_EMA_BA_0, DA830_EMA_BA_1, DA830_NEMA_WE,
DA830_NEMA_CS_2, DA830_NEMA_CS_3, DA830_NEMA_OE, DA830_EMA_WAIT_0,
-1
};
#if defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
#define HAS_MMC 1
#else
#define HAS_MMC 0
#endif
#ifdef CONFIG_DA830_UI_NAND
static struct mtd_partition da830_evm_nand_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
[0] = {
.name = "bootloader",
.offset = 0,
.size = SZ_128K,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
/* bootloader params in the next sector */
[1] = {
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = SZ_128K,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
/* kernel */
[2] = {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_2M,
.mask_flags = 0,
},
/* file system */
[3] = {
.name = "filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
}
};
/* flash bbt decriptors */
static uint8_t da830_evm_nand_bbt_pattern[] = { 'B', 'b', 't', '0' };
static uint8_t da830_evm_nand_mirror_pattern[] = { '1', 't', 'b', 'B' };
static struct nand_bbt_descr da830_evm_nand_bbt_main_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE |
NAND_BBT_WRITE | NAND_BBT_2BIT |
NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 2,
.len = 4,
.veroffs = 16,
.maxblocks = 4,
.pattern = da830_evm_nand_bbt_pattern
};
static struct nand_bbt_descr da830_evm_nand_bbt_mirror_descr = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE |
NAND_BBT_WRITE | NAND_BBT_2BIT |
NAND_BBT_VERSION | NAND_BBT_PERCHIP,
.offs = 2,
.len = 4,
.veroffs = 16,
.maxblocks = 4,
.pattern = da830_evm_nand_mirror_pattern
};
static struct davinci_aemif_timing da830_evm_nandflash_timing = {
.wsetup = 24,
.wstrobe = 21,
.whold = 14,
.rsetup = 19,
.rstrobe = 50,
.rhold = 0,
.ta = 20,
};
static struct davinci_nand_pdata da830_evm_nand_pdata = {
.parts = da830_evm_nand_partitions,
.nr_parts = ARRAY_SIZE(da830_evm_nand_partitions),
.ecc_mode = NAND_ECC_HW,
.ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
.bbt_td = &da830_evm_nand_bbt_main_descr,
.bbt_md = &da830_evm_nand_bbt_mirror_descr,
.timing = &da830_evm_nandflash_timing,
};
static struct resource da830_evm_nand_resources[] = {
[0] = { /* First memory resource is NAND I/O window */
.start = DA8XX_AEMIF_CS3_BASE,
.end = DA8XX_AEMIF_CS3_BASE + PAGE_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = { /* Second memory resource is AEMIF control registers */
.start = DA8XX_AEMIF_CTL_BASE,
.end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device da830_evm_nand_device = {
.name = "davinci_nand",
.id = 1,
.dev = {
.platform_data = &da830_evm_nand_pdata,
},
.num_resources = ARRAY_SIZE(da830_evm_nand_resources),
.resource = da830_evm_nand_resources,
};
static inline void da830_evm_init_nand(int mux_mode)
{
int ret;
if (HAS_MMC) {
pr_warning("WARNING: both MMC/SD and NAND are "
"enabled, but they share AEMIF pins.\n"
"\tDisable MMC/SD for NAND support.\n");
return;
}
ret = davinci_cfg_reg_list(da830_evm_emif25_pins);
if (ret)
pr_warning("da830_evm_init: emif25 mux setup failed: %d\n",
ret);
ret = platform_device_register(&da830_evm_nand_device);
if (ret)
pr_warning("da830_evm_init: NAND device not registered.\n");
gpio_direction_output(mux_mode, 1);
}
#else
static inline void da830_evm_init_nand(int mux_mode) { }
#endif
#ifdef CONFIG_DA830_UI_LCD
static inline void da830_evm_init_lcdc(int mux_mode)
{
int ret;
ret = davinci_cfg_reg_list(da830_lcdcntl_pins);
if (ret)
pr_warning("da830_evm_init: lcdcntl mux setup failed: %d\n",
ret);
ret = da8xx_register_lcdc(&sharp_lcd035q3dg01_pdata);
if (ret)
pr_warning("da830_evm_init: lcd setup failed: %d\n", ret);
gpio_direction_output(mux_mode, 0);
}
#else
static inline void da830_evm_init_lcdc(int mux_mode) { }
#endif
static struct at24_platform_data da830_evm_i2c_eeprom_info = {
.byte_len = SZ_256K / 8,
.page_size = 64,
.flags = AT24_FLAG_ADDR16,
.setup = davinci_get_mac_addr,
.context = (void *)0x7f00,
};
static int __init da830_evm_ui_expander_setup(struct i2c_client *client,
int gpio, unsigned ngpio, void *context)
{
gpio_request(gpio + 6, "UI MUX_MODE");
/* Drive mux mode low to match the default without UI card */
gpio_direction_output(gpio + 6, 0);
da830_evm_init_lcdc(gpio + 6);
da830_evm_init_nand(gpio + 6);
return 0;
}
static int da830_evm_ui_expander_teardown(struct i2c_client *client, int gpio,
unsigned ngpio, void *context)
{
gpio_free(gpio + 6);
return 0;
}
static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = {
.gpio_base = DAVINCI_N_GPIO,
.setup = da830_evm_ui_expander_setup,
.teardown = da830_evm_ui_expander_teardown,
};
static struct i2c_board_info __initdata da830_evm_i2c_devices[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
.platform_data = &da830_evm_i2c_eeprom_info,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
},
{
I2C_BOARD_INFO("pcf8574", 0x3f),
.platform_data = &da830_evm_ui_expander_info,
},
};
static struct davinci_i2c_platform_data da830_evm_i2c_0_pdata = {
.bus_freq = 100, /* kHz */
.bus_delay = 0, /* usec */
};
/*
* The following EDMA channels/slots are not being used by drivers (for
* example: Timer, GPIO, UART events etc) on da830/omap-l137 EVM, hence
* they are being reserved for codecs on the DSP side.
*/
static const s16 da830_dma_rsv_chans[][2] = {
/* (offset, number) */
{ 8, 2},
{12, 2},
{24, 4},
{30, 2},
{-1, -1}
};
static const s16 da830_dma_rsv_slots[][2] = {
/* (offset, number) */
{ 8, 2},
{12, 2},
{24, 4},
{30, 26},
{-1, -1}
};
static struct edma_rsv_info da830_edma_rsv[] = {
{
.rsv_chans = da830_dma_rsv_chans,
.rsv_slots = da830_dma_rsv_slots,
},
};
static struct mtd_partition da830evm_spiflash_part[] = {
[0] = {
.name = "DSP-UBL",
.offset = 0,
.size = SZ_8K,
.mask_flags = MTD_WRITEABLE,
},
[1] = {
.name = "ARM-UBL",
.offset = MTDPART_OFS_APPEND,
.size = SZ_16K + SZ_8K,
.mask_flags = MTD_WRITEABLE,
},
[2] = {
.name = "U-Boot",
.offset = MTDPART_OFS_APPEND,
.size = SZ_256K - SZ_32K,
.mask_flags = MTD_WRITEABLE,
},
[3] = {
.name = "U-Boot-Environment",
.offset = MTDPART_OFS_APPEND,
.size = SZ_16K,
.mask_flags = 0,
},
[4] = {
.name = "Kernel",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
},
};
static struct flash_platform_data da830evm_spiflash_data = {
.name = "m25p80",
.parts = da830evm_spiflash_part,
.nr_parts = ARRAY_SIZE(da830evm_spiflash_part),
.type = "w25x32",
};
static struct davinci_spi_config da830evm_spiflash_cfg = {
.io_type = SPI_IO_TYPE_DMA,
.c2tdelay = 8,
.t2cdelay = 8,
};
static struct spi_board_info da830evm_spi_info[] = {
{
.modalias = "m25p80",
.platform_data = &da830evm_spiflash_data,
.controller_data = &da830evm_spiflash_cfg,
.mode = SPI_MODE_0,
.max_speed_hz = 30000000,
.bus_num = 0,
.chip_select = 0,
},
};
static __init void da830_evm_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
int ret;
ret = da830_register_edma(da830_edma_rsv);
if (ret)
pr_warning("da830_evm_init: edma registration failed: %d\n",
ret);
ret = davinci_cfg_reg_list(da830_i2c0_pins);
if (ret)
pr_warning("da830_evm_init: i2c0 mux setup failed: %d\n",
ret);
ret = da8xx_register_i2c(0, &da830_evm_i2c_0_pdata);
if (ret)
pr_warning("da830_evm_init: i2c0 registration failed: %d\n",
ret);
da830_evm_usb_init();
soc_info->emac_pdata->rmii_en = 1;
soc_info->emac_pdata->phy_id = DA830_EVM_PHY_ID;
ret = davinci_cfg_reg_list(da830_cpgmac_pins);
if (ret)
pr_warning("da830_evm_init: cpgmac mux setup failed: %d\n",
ret);
ret = da8xx_register_emac();
if (ret)
pr_warning("da830_evm_init: emac registration failed: %d\n",
ret);
ret = da8xx_register_watchdog();
if (ret)
pr_warning("da830_evm_init: watchdog registration failed: %d\n",
ret);
davinci_serial_init(&da830_evm_uart_config);
i2c_register_board_info(1, da830_evm_i2c_devices,
ARRAY_SIZE(da830_evm_i2c_devices));
ret = davinci_cfg_reg_list(da830_evm_mcasp1_pins);
if (ret)
pr_warning("da830_evm_init: mcasp1 mux setup failed: %d\n",
ret);
da8xx_register_mcasp(1, &da830_evm_snd_data);
da830_evm_init_mmc();
ret = da8xx_register_rtc();
if (ret)
pr_warning("da830_evm_init: rtc setup failed: %d\n", ret);
ret = da8xx_register_spi(0, da830evm_spi_info,
ARRAY_SIZE(da830evm_spi_info));
if (ret)
pr_warning("da830_evm_init: spi 0 registration failed: %d\n",
ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init da830_evm_console_init(void)
{
if (!machine_is_davinci_da830_evm())
return 0;
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(da830_evm_console_init);
#endif
static void __init da830_evm_map_io(void)
{
da830_init();
}
MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.atag_offset = 0x100,
.map_io = da830_evm_map_io,
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da830_evm_init,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
| gpl-2.0 |
TeamWin/kernel_samsung_lt02ltetmo | drivers/net/ethernet/8390/es3210.c | 5083 | 12788 | /*
es3210.c
Linux driver for Racal-Interlan ES3210 EISA Network Adapter
Copyright (C) 1996, Paul Gortmaker.
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
Information and Code Sources:
1) The existing myriad of Linux 8390 drivers written by Donald Becker.
2) Once again Russ Nelson's asm packet driver provided additional info.
3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
Too bad it doesn't work -- see below.
The ES3210 is an EISA shared memory NS8390 implementation. Note
that all memory copies to/from the board must be 32bit transfers.
Which rules out using eth_io_copy_and_sum() in this driver.
Apparently there are two slightly different revisions of the
card, since there are two distinct EISA cfg files (!rii0101.cfg
and !rii0102.cfg) One has media select in the cfg file and the
other doesn't. Hopefully this will work with either.
That is about all I can tell you about it, having never actually
even seen one of these cards. :) Try http://www.interlan.com
if you want more info.
Thanks go to Mark Salazar for testing v0.02 of this driver.
Bugs, to-fix, etc:
1) The EISA cfg ports that are *supposed* to have the IRQ and shared
mem values just read 0xff all the time. Hrrmpf. Apparently the
same happens with the packet driver as the code for reading
these registers is disabled there. In the meantime, boot with:
ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
shared memory detection. (The i/o port detection is okay.)
2) Module support currently untested. Probably works though.
*/
static const char version[] =
"es3210.c: Driver revision v0.03, 14/09/96\n";
#include <linux/module.h>
#include <linux/eisa.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <asm/io.h>
#include "8390.h"
static int es_probe1(struct net_device *dev, int ioaddr);
static void es_reset_8390(struct net_device *dev);
static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
#define ES_START_PG 0x00 /* First page of TX buffer */
#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
#define ES_RESET_PORT 0xc84 /* From the packet driver source */
#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
#define ES_ADDR1 0x07
#define ES_ADDR2 0x01
/*
* Two card revisions. EISA ID's are always rev. minor, rev. major,, and
* then the three vendor letters stored in 5 bits each, with an "a" = 1.
* For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
* config utility determines automagically what config file(s) to use.
*/
#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
#define ES_CFG2 0xcc1
#define ES_CFG3 0xcc2
#define ES_CFG4 0xcc3
#define ES_CFG5 0xcc4
#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
/*
* You can OR any of the following bits together and assign it
* to ES_DEBUG to get verbose driver info during operation.
* Some of these don't do anything yet.
*/
#define ES_D_PROBE 0x01
#define ES_D_RX_PKT 0x02
#define ES_D_TX_PKT 0x04
#define ED_D_IRQ 0x08
#define ES_DEBUG 0
static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
/*
* Probe for the card. The best way is to read the EISA ID if it
* is known. Then we check the prefix of the station address
* PROM for a match against the Racal-Interlan assigned value.
*/
static int __init do_es_probe(struct net_device *dev)
{
unsigned short ioaddr = dev->base_addr;
int irq = dev->irq;
int mem_start = dev->mem_start;
if (ioaddr > 0x1ff) /* Check a single specified location. */
return es_probe1(dev, ioaddr);
else if (ioaddr > 0) /* Don't probe at all. */
return -ENXIO;
if (!EISA_bus) {
#if ES_DEBUG & ES_D_PROBE
printk("es3210.c: Not EISA bus. Not probing high ports.\n");
#endif
return -ENXIO;
}
/* EISA spec allows for up to 16 slots, but 8 is typical. */
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
if (es_probe1(dev, ioaddr) == 0)
return 0;
dev->irq = irq;
dev->mem_start = mem_start;
}
return -ENODEV;
}
#ifndef MODULE
struct net_device * __init es_probe(int unit)
{
struct net_device *dev = alloc_ei_netdev();
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_es_probe(dev);
if (err)
goto out;
return dev;
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
static int __init es_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
unsigned long eisa_id;
if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
return -ENODEV;
#if ES_DEBUG & ES_D_PROBE
printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
#endif
/* Check the EISA ID of the card. */
eisa_id = inl(ioaddr + ES_ID_PORT);
if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
retval = -ENODEV;
goto out;
}
for (i = 0; i < ETH_ALEN ; i++)
dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
/* Check the Racal vendor ID as well. */
if (dev->dev_addr[0] != ES_ADDR0 ||
dev->dev_addr[1] != ES_ADDR1 ||
dev->dev_addr[2] != ES_ADDR2) {
printk("es3210.c: card not found %pM (invalid_prefix).\n",
dev->dev_addr);
retval = -ENODEV;
goto out;
}
printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
eisa_id>>24, ioaddr, dev->dev_addr);
/* Snarf the interrupt now. */
if (dev->irq == 0) {
unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
if (hi_irq != 0) {
dev->irq = hi_irq_map[hi_irq - 1];
} else {
int i = 0;
while (lo_irq > (1<<i)) i++;
dev->irq = lo_irq_map[i];
}
printk(" using IRQ %d", dev->irq);
#if ES_DEBUG & ES_D_PROBE
printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
hi_irq, lo_irq, dev->irq);
#endif
} else {
if (dev->irq == 2)
dev->irq = 9; /* Doh! */
printk(" assigning IRQ %d", dev->irq);
}
if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
printk (" unable to get IRQ %d.\n", dev->irq);
retval = -EAGAIN;
goto out;
}
if (dev->mem_start == 0) {
unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
if (mem_enabled != 0x80) {
printk(" shared mem disabled - giving up\n");
retval = -ENXIO;
goto out1;
}
dev->mem_start = 0xC0000 + mem_bits*0x4000;
printk(" using ");
} else {
printk(" assigning ");
}
ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
if (!ei_status.mem) {
printk("ioremap failed - giving up\n");
retval = -ENXIO;
goto out1;
}
dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
#if ES_DEBUG & ES_D_PROBE
if (inb(ioaddr + ES_CFG5))
printk("es3210: Warning - DMA channel enabled, but not used here.\n");
#endif
/* Note, point at the 8390, and not the card... */
dev->base_addr = ioaddr + ES_NIC_OFFSET;
ei_status.name = "ES3210";
ei_status.tx_start_page = ES_START_PG;
ei_status.rx_start_page = ES_START_PG + TX_PAGES;
ei_status.stop_page = ES_STOP_PG;
ei_status.word16 = 1;
if (ei_debug > 0)
printk(version);
ei_status.reset_8390 = &es_reset_8390;
ei_status.block_input = &es_block_input;
ei_status.block_output = &es_block_output;
ei_status.get_8390_hdr = &es_get_8390_hdr;
dev->netdev_ops = &ei_netdev_ops;
NS8390_init(dev, 0);
retval = register_netdev(dev);
if (retval)
goto out1;
return 0;
out1:
free_irq(dev->irq, dev);
out:
release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
return retval;
}
/*
* Reset as per the packet driver method. Judging by the EISA cfg
* file, this just toggles the "Board Enable" bits (bit 2 and 0).
*/
static void es_reset_8390(struct net_device *dev)
{
unsigned short ioaddr = dev->base_addr;
unsigned long end;
outb(0x04, ioaddr + ES_RESET_PORT);
if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
end = jiffies + 2*HZ/100;
while ((signed)(end - jiffies) > 0) continue;
ei_status.txing = 0;
outb(0x01, ioaddr + ES_RESET_PORT);
if (ei_debug > 1) printk("reset done\n");
}
/*
* Note: In the following three functions is the implicit assumption
* that the associated memcpy will only use "rep; movsl" as long as
* we keep the counts as some multiple of doublewords. This is a
* requirement of the hardware, and also prevents us from using
* eth_io_copy_and_sum() since we can't guarantee it will limit
* itself to doubleword access.
*/
/*
* Grab the 8390 specific header. Similar to the block_input routine, but
* we don't need to be concerned with ring wrap as the header will be at
* the start of a page, so we optimize accordingly. (A single doubleword.)
*/
static void
es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
}
/*
* Block input and output are easy on shared memory ethercards, the only
* complication is when the ring buffer wraps. The count will already
* be rounded up to a doubleword value via es_get_8390_hdr() above.
*/
static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
int ring_offset)
{
void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
if (ring_offset + count > ES_STOP_PG*256) {
/* Packet wraps over end of ring buffer. */
int semi_count = ES_STOP_PG*256 - ring_offset;
memcpy_fromio(skb->data, xfer_start, semi_count);
count -= semi_count;
memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
} else {
/* Packet is in one chunk. */
memcpy_fromio(skb->data, xfer_start, count);
}
}
static void es_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page)
{
void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
count = (count + 3) & ~3; /* Round up to doubleword */
memcpy_toio(shmem, buf, count);
}
#ifdef MODULE
#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
#define NAMELEN 8 /* # of chars for storing dev->name */
static struct net_device *dev_es3210[MAX_ES_CARDS];
static int io[MAX_ES_CARDS];
static int irq[MAX_ES_CARDS];
static int mem[MAX_ES_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(mem, "memory base address(es)");
MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
MODULE_LICENSE("GPL");
int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
if (io[this_dev] == 0 && this_dev != 0)
break;
dev = alloc_ei_netdev();
if (!dev)
break;
dev->irq = irq[this_dev];
dev->base_addr = io[this_dev];
dev->mem_start = mem[this_dev];
if (do_es_probe(dev) == 0) {
dev_es3210[found++] = dev;
continue;
}
free_netdev(dev);
printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
break;
}
if (found)
return 0;
return -ENXIO;
}
static void cleanup_card(struct net_device *dev)
{
free_irq(dev->irq, dev);
release_region(dev->base_addr, ES_IO_EXTENT);
iounmap(ei_status.mem);
}
void __exit
cleanup_module(void)
{
int this_dev;
for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
struct net_device *dev = dev_es3210[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
| gpl-2.0 |
TeamWin/android_kernel_samsung_j2lte | drivers/rtc/rtc-pl030.c | 7387 | 3980 | /*
* linux/drivers/rtc/rtc-pl030.c
*
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/slab.h>
#define RTC_DR (0)
#define RTC_MR (4)
#define RTC_STAT (8)
#define RTC_EOI (8)
#define RTC_LR (12)
#define RTC_CR (16)
#define RTC_CR_MIE (1 << 0)
struct pl030_rtc {
struct rtc_device *rtc;
void __iomem *base;
};
static irqreturn_t pl030_interrupt(int irq, void *dev_id)
{
struct pl030_rtc *rtc = dev_id;
writel(0, rtc->base + RTC_EOI);
return IRQ_HANDLED;
}
static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time);
return 0;
}
static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
unsigned long time;
int ret;
/*
* At the moment, we can only deal with non-wildcarded alarm times.
*/
ret = rtc_valid_tm(&alrm->time);
if (ret == 0)
ret = rtc_tm_to_time(&alrm->time, &time);
if (ret == 0)
writel(time, rtc->base + RTC_MR);
return ret;
}
static int pl030_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
rtc_time_to_tm(readl(rtc->base + RTC_DR), tm);
return 0;
}
/*
* Set the RTC time. Unfortunately, we can't accurately set
* the point at which the counter updates.
*
* Also, since RTC_LR is transferred to RTC_CR on next rising
* edge of the 1Hz clock, we must write the time one second
* in advance.
*/
static int pl030_set_time(struct device *dev, struct rtc_time *tm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
unsigned long time;
int ret;
ret = rtc_tm_to_time(tm, &time);
if (ret == 0)
writel(time + 1, rtc->base + RTC_LR);
return ret;
}
static const struct rtc_class_ops pl030_ops = {
.read_time = pl030_read_time,
.set_time = pl030_set_time,
.read_alarm = pl030_read_alarm,
.set_alarm = pl030_set_alarm,
};
static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
ret = amba_request_regions(dev, NULL);
if (ret)
goto err_req;
rtc = kmalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc) {
ret = -ENOMEM;
goto err_rtc;
}
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
goto err_map;
}
__raw_writel(0, rtc->base + RTC_CR);
__raw_writel(0, rtc->base + RTC_EOI);
amba_set_drvdata(dev, rtc);
ret = request_irq(dev->irq[0], pl030_interrupt, 0,
"rtc-pl030", rtc);
if (ret)
goto err_irq;
rtc->rtc = rtc_device_register("pl030", &dev->dev, &pl030_ops,
THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto err_reg;
}
return 0;
err_reg:
free_irq(dev->irq[0], rtc);
err_irq:
iounmap(rtc->base);
err_map:
kfree(rtc);
err_rtc:
amba_release_regions(dev);
err_req:
return ret;
}
static int pl030_remove(struct amba_device *dev)
{
struct pl030_rtc *rtc = amba_get_drvdata(dev);
amba_set_drvdata(dev, NULL);
writel(0, rtc->base + RTC_CR);
free_irq(dev->irq[0], rtc);
rtc_device_unregister(rtc->rtc);
iounmap(rtc->base);
kfree(rtc);
amba_release_regions(dev);
return 0;
}
static struct amba_id pl030_ids[] = {
{
.id = 0x00041030,
.mask = 0x000fffff,
},
{ 0, 0 },
};
MODULE_DEVICE_TABLE(amba, pl030_ids);
static struct amba_driver pl030_driver = {
.drv = {
.name = "rtc-pl030",
},
.probe = pl030_probe,
.remove = pl030_remove,
.id_table = pl030_ids,
};
module_amba_driver(pl030_driver);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("ARM AMBA PL030 RTC Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_amazon_hdx-common | drivers/scsi/bfa/bfa_ioc_cb.c | 7899 | 9084 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CB);
/*
* forward declarations
*/
static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc);
static struct bfa_ioc_hwif_s hwif_cb;
/*
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
{
hwif_cb.ioc_pll_init = bfa_ioc_cb_pll_init;
hwif_cb.ioc_firmware_lock = bfa_ioc_cb_firmware_lock;
hwif_cb.ioc_firmware_unlock = bfa_ioc_cb_firmware_unlock;
hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init;
hwif_cb.ioc_map_port = bfa_ioc_cb_map_port;
hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set;
hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail;
hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset;
hwif_cb.ioc_sync_start = bfa_ioc_cb_sync_start;
hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join;
hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave;
hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack;
hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete;
ioc->ioc_hwif = &hwif_cb;
}
/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
{
return BFA_TRUE;
}
static void
bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
{
}
/*
* Notify other functions on HB failure.
*/
static void
bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
{
writel(~0U, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
}
/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
};
/*
* Host <-> LPU mailbox command/status registers
*/
static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
};
static void
bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
} else {
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG);
}
/*
* Host <-> LPU mailbox command/status registers
*/
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
/*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
/*
* err set reg : for notification of hb failure
*/
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
/*
* Initialize IOC to port mapping.
*/
static void
bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
{
/*
* For crossbow, port id is same as pci function.
*/
ioc->port_id = bfa_ioc_pcifn(ioc);
bfa_trc(ioc, ioc->port_id);
}
/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
}
/*
* Synchronized IOC failure processing routines
*/
static bfa_boolean_t
bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc)
{
return bfa_ioc_cb_sync_complete(ioc);
}
/*
* Cleanup hw semaphore and usecnt registers
*/
static void
bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
{
/*
* Read the hw sem reg to make sure that it is locked
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
readl(ioc->ioc_regs.ioc_sem_reg);
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
/*
* Synchronized IOC failure processing routines
*/
static void
bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc)
{
}
static void
bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc)
{
}
static void
bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc)
{
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
}
static bfa_boolean_t
bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
{
uint32_t fwstate, alt_fwstate;
fwstate = readl(ioc->ioc_regs.ioc_fwstate);
/*
* At this point, this IOC is hoding the hw sem in the
* start path (fwcheck) OR in the disable/enable path
* OR to check if the other IOC has acknowledged failure.
*
* So, this IOC can be in UNINIT, INITING, DISABLED, FAIL
* or in MEMTEST states. In a normal scenario, this IOC
* can not be in OP state when this function is called.
*
* However, this IOC could still be in OP state when
* the OS driver is starting up, if the OptROM code has
* left it in that state.
*
* If we had marked this IOC's fwstate as BFI_IOC_FAIL
* in the failure case and now, if the fwstate is not
* BFI_IOC_FAIL it implies that the other PCI fn have
* reinitialized the ASIC or this IOC got disabled, so
* return TRUE.
*/
if (fwstate == BFI_IOC_UNINIT ||
fwstate == BFI_IOC_INITING ||
fwstate == BFI_IOC_DISABLED ||
fwstate == BFI_IOC_MEMTEST ||
fwstate == BFI_IOC_OP)
return BFA_TRUE;
else {
alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate);
if (alt_fwstate == BFI_IOC_FAIL ||
alt_fwstate == BFI_IOC_DISABLED ||
alt_fwstate == BFI_IOC_UNINIT ||
alt_fwstate == BFI_IOC_INITING ||
alt_fwstate == BFI_IOC_MEMTEST)
return BFA_TRUE;
else
return BFA_FALSE;
}
}
bfa_status_t
bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
{
u32 pll_sclk, pll_fclk;
pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
__APP_PLL_SCLK_P0_1(3U) |
__APP_PLL_SCLK_JITLMT0_1(3U) |
__APP_PLL_SCLK_CNTLMT0_1(3U);
pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
__APP_PLL_LCLK_JITLMT0_1(3U) |
__APP_PLL_LCLK_CNTLMT0_1(3U);
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_SCLK_CTL_REG);
writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_LCLK_CTL_REG);
udelay(2);
writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_LCLK_CTL_REG);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
return BFA_STATUS_OK;
}
| gpl-2.0 |
MyAOSP/kernel_htc_msm8960 | drivers/infiniband/hw/qib/qib_intr.c | 7899 | 7632 | /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/delay.h>
#include "qib.h"
#include "qib_common.h"
/**
* qib_format_hwmsg - format a single hwerror message
* @msg message buffer
* @msgl length of message buffer
* @hwmsg message to add to message buffer
*/
static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
{
strlcat(msg, "[", msgl);
strlcat(msg, hwmsg, msgl);
strlcat(msg, "]", msgl);
}
/**
* qib_format_hwerrors - format hardware error messages for display
* @hwerrs hardware errors bit vector
* @hwerrmsgs hardware error descriptions
* @nhwerrmsgs number of hwerrmsgs
* @msg message buffer
* @msgl message buffer length
*/
void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t msgl)
{
int i;
for (i = 0; i < nhwerrmsgs; i++)
if (hwerrs & hwerrmsgs[i].mask)
qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
}
static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
{
struct ib_event event;
struct qib_devdata *dd = ppd->dd;
event.device = &dd->verbs_dev.ibdev;
event.element.port_num = ppd->port;
event.event = ev;
ib_dispatch_event(&event);
}
void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
{
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
u32 lstate;
u8 ltstate;
enum ib_event_type ev = 0;
lstate = dd->f_iblink_state(ibcs); /* linkstate */
ltstate = dd->f_ibphys_portstate(ibcs);
/*
* If linkstate transitions into INIT from any of the various down
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
*
* ppd->lflags could be 0 if this is the first time the interrupt
* handlers has been called but the link is already up.
*/
if (lstate >= IB_PORT_INIT &&
(!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
goto skip_ibchange; /* chip-code handled */
} else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
dd->f_ib_updown(ppd, 0, ibcs))
goto skip_ibchange; /* chip-code handled */
qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
}
if (lstate != IB_PORT_DOWN) {
/* lstate is INIT, ARMED, or ACTIVE */
if (lstate != IB_PORT_ACTIVE) {
*ppd->statusp &= ~QIB_STATUS_IB_READY;
if (ppd->lflags & QIBL_LINKACTIVE)
ev = IB_EVENT_PORT_ERR;
spin_lock_irqsave(&ppd->lflags_lock, flags);
if (lstate == IB_PORT_ARMED) {
ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKDOWN | QIBL_LINKACTIVE);
} else {
ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKARMED |
QIBL_LINKDOWN | QIBL_LINKACTIVE);
}
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
/* start a 75msec timer to clear symbol errors */
mod_timer(&ppd->symerr_clear_timer,
msecs_to_jiffies(75));
} else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
!(ppd->lflags & QIBL_LINKACTIVE)) {
/* active, but not active defered */
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
qib_clear_symerror_on_linkup((unsigned long)ppd);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKDOWN | QIBL_LINKARMED);
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
if (dd->flags & QIB_HAS_SEND_DMA)
qib_sdma_process_event(ppd,
qib_sdma_event_e30_go_running);
ev = IB_EVENT_PORT_ACTIVE;
dd->f_setextled(ppd, 1);
}
} else { /* down */
if (ppd->lflags & QIBL_LINKACTIVE)
ev = IB_EVENT_PORT_ERR;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
QIBL_LINKACTIVE | QIBL_LINKARMED);
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
*ppd->statusp &= ~QIB_STATUS_IB_READY;
}
skip_ibchange:
ppd->lastibcstat = ibcs;
if (ev)
signal_ib_event(ppd, ev);
return;
}
void qib_clear_symerror_on_linkup(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
if (ppd->lflags & QIBL_LINKACTIVE)
return;
ppd->ibport_data.z_symbol_error_counter =
ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
}
/*
* Handle receive interrupts for user ctxts; this means a user
* process was waiting for a packet to arrive, and didn't want
* to poll.
*/
void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
{
struct qib_ctxtdata *rcd;
unsigned long flags;
int i;
spin_lock_irqsave(&dd->uctxt_lock, flags);
for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
if (!(ctxtr & (1ULL << i)))
continue;
rcd = dd->rcd[i];
if (!rcd || !rcd->cnt)
continue;
if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
wake_up_interruptible(&rcd->wait);
dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
rcd->ctxt);
} else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
&rcd->flag)) {
rcd->urgent++;
wake_up_interruptible(&rcd->wait);
}
}
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
}
void qib_bad_intrstatus(struct qib_devdata *dd)
{
static int allbits;
/* separate routine, for better optimization of qib_intr() */
/*
* We print the message and disable interrupts, in hope of
* having a better chance of debugging the problem.
*/
qib_dev_err(dd, "Read of chip interrupt status failed"
" disabling interrupts\n");
if (allbits++) {
/* disable interrupt delivery, something is very wrong */
if (allbits == 2)
dd->f_set_intr_state(dd, 0);
if (allbits == 3) {
qib_dev_err(dd, "2nd bad interrupt status, "
"unregistering interrupts\n");
dd->flags |= QIB_BADINTR;
dd->flags &= ~QIB_INITTED;
dd->f_free_irq(dd);
}
}
}
| gpl-2.0 |
voltagex/msm | sound/pci/ca0106/ca0106_proc.c | 8155 | 14349 | /*
* Copyright (c) 2004 James Courtier-Dutton <James@superbug.demon.co.uk>
* Driver CA0106 chips. e.g. Sound Blaster Audigy LS and Live 24bit
* Version: 0.0.18
*
* FEATURES currently supported:
* See ca0106_main.c for features.
*
* Changelog:
* Support interrupts per period.
* Removed noise from Center/LFE channel when in Analog mode.
* Rename and remove mixer controls.
* 0.0.6
* Use separate card based DMA buffer for periods table list.
* 0.0.7
* Change remove and rename ctrls into lists.
* 0.0.8
* Try to fix capture sources.
* 0.0.9
* Fix AC3 output.
* Enable S32_LE format support.
* 0.0.10
* Enable playback 48000 and 96000 rates. (Rates other that these do not work, even with "plug:front".)
* 0.0.11
* Add Model name recognition.
* 0.0.12
* Correct interrupt timing. interrupt at end of period, instead of in the middle of a playback period.
* Remove redundent "voice" handling.
* 0.0.13
* Single trigger call for multi channels.
* 0.0.14
* Set limits based on what the sound card hardware can do.
* playback periods_min=2, periods_max=8
* capture hw constraints require period_size = n * 64 bytes.
* playback hw constraints require period_size = n * 64 bytes.
* 0.0.15
* Separate ca0106.c into separate functional .c files.
* 0.0.16
* Modified Copyright message.
* 0.0.17
* Add iec958 file in proc file system to show status of SPDIF in.
* 0.0.18
* Implement support for Line-in capture on SB Live 24bit.
*
* This code was initially based on code from ALSA's emu10k1x.c which is:
* Copyright (c) by Francisco Moraes <fmoraes@nc.rr.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/asoundef.h>
#include <asm/io.h>
#include "ca0106.h"
#ifdef CONFIG_PROC_FS
struct snd_ca0106_category_str {
int val;
const char *name;
};
static struct snd_ca0106_category_str snd_ca0106_con_category[] = {
{ IEC958_AES1_CON_DAT, "DAT" },
{ IEC958_AES1_CON_VCR, "VCR" },
{ IEC958_AES1_CON_MICROPHONE, "microphone" },
{ IEC958_AES1_CON_SYNTHESIZER, "synthesizer" },
{ IEC958_AES1_CON_RATE_CONVERTER, "rate converter" },
{ IEC958_AES1_CON_MIXER, "mixer" },
{ IEC958_AES1_CON_SAMPLER, "sampler" },
{ IEC958_AES1_CON_PCM_CODER, "PCM coder" },
{ IEC958_AES1_CON_IEC908_CD, "CD" },
{ IEC958_AES1_CON_NON_IEC908_CD, "non-IEC908 CD" },
{ IEC958_AES1_CON_GENERAL, "general" },
};
static void snd_ca0106_proc_dump_iec958( struct snd_info_buffer *buffer, u32 value)
{
int i;
u32 status[4];
status[0] = value & 0xff;
status[1] = (value >> 8) & 0xff;
status[2] = (value >> 16) & 0xff;
status[3] = (value >> 24) & 0xff;
if (! (status[0] & IEC958_AES0_PROFESSIONAL)) {
/* consumer */
snd_iprintf(buffer, "Mode: consumer\n");
snd_iprintf(buffer, "Data: ");
if (!(status[0] & IEC958_AES0_NONAUDIO)) {
snd_iprintf(buffer, "audio\n");
} else {
snd_iprintf(buffer, "non-audio\n");
}
snd_iprintf(buffer, "Rate: ");
switch (status[3] & IEC958_AES3_CON_FS) {
case IEC958_AES3_CON_FS_44100:
snd_iprintf(buffer, "44100 Hz\n");
break;
case IEC958_AES3_CON_FS_48000:
snd_iprintf(buffer, "48000 Hz\n");
break;
case IEC958_AES3_CON_FS_32000:
snd_iprintf(buffer, "32000 Hz\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
snd_iprintf(buffer, "Copyright: ");
if (status[0] & IEC958_AES0_CON_NOT_COPYRIGHT) {
snd_iprintf(buffer, "permitted\n");
} else {
snd_iprintf(buffer, "protected\n");
}
snd_iprintf(buffer, "Emphasis: ");
if ((status[0] & IEC958_AES0_CON_EMPHASIS) != IEC958_AES0_CON_EMPHASIS_5015) {
snd_iprintf(buffer, "none\n");
} else {
snd_iprintf(buffer, "50/15us\n");
}
snd_iprintf(buffer, "Category: ");
for (i = 0; i < ARRAY_SIZE(snd_ca0106_con_category); i++) {
if ((status[1] & IEC958_AES1_CON_CATEGORY) == snd_ca0106_con_category[i].val) {
snd_iprintf(buffer, "%s\n", snd_ca0106_con_category[i].name);
break;
}
}
if (i >= ARRAY_SIZE(snd_ca0106_con_category)) {
snd_iprintf(buffer, "unknown 0x%x\n", status[1] & IEC958_AES1_CON_CATEGORY);
}
snd_iprintf(buffer, "Original: ");
if (status[1] & IEC958_AES1_CON_ORIGINAL) {
snd_iprintf(buffer, "original\n");
} else {
snd_iprintf(buffer, "1st generation\n");
}
snd_iprintf(buffer, "Clock: ");
switch (status[3] & IEC958_AES3_CON_CLOCK) {
case IEC958_AES3_CON_CLOCK_1000PPM:
snd_iprintf(buffer, "1000 ppm\n");
break;
case IEC958_AES3_CON_CLOCK_50PPM:
snd_iprintf(buffer, "50 ppm\n");
break;
case IEC958_AES3_CON_CLOCK_VARIABLE:
snd_iprintf(buffer, "variable pitch\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
} else {
snd_iprintf(buffer, "Mode: professional\n");
snd_iprintf(buffer, "Data: ");
if (!(status[0] & IEC958_AES0_NONAUDIO)) {
snd_iprintf(buffer, "audio\n");
} else {
snd_iprintf(buffer, "non-audio\n");
}
snd_iprintf(buffer, "Rate: ");
switch (status[0] & IEC958_AES0_PRO_FS) {
case IEC958_AES0_PRO_FS_44100:
snd_iprintf(buffer, "44100 Hz\n");
break;
case IEC958_AES0_PRO_FS_48000:
snd_iprintf(buffer, "48000 Hz\n");
break;
case IEC958_AES0_PRO_FS_32000:
snd_iprintf(buffer, "32000 Hz\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
snd_iprintf(buffer, "Rate Locked: ");
if (status[0] & IEC958_AES0_PRO_FREQ_UNLOCKED)
snd_iprintf(buffer, "no\n");
else
snd_iprintf(buffer, "yes\n");
snd_iprintf(buffer, "Emphasis: ");
switch (status[0] & IEC958_AES0_PRO_EMPHASIS) {
case IEC958_AES0_PRO_EMPHASIS_CCITT:
snd_iprintf(buffer, "CCITT J.17\n");
break;
case IEC958_AES0_PRO_EMPHASIS_NONE:
snd_iprintf(buffer, "none\n");
break;
case IEC958_AES0_PRO_EMPHASIS_5015:
snd_iprintf(buffer, "50/15us\n");
break;
case IEC958_AES0_PRO_EMPHASIS_NOTID:
default:
snd_iprintf(buffer, "unknown\n");
break;
}
snd_iprintf(buffer, "Stereophonic: ");
if ((status[1] & IEC958_AES1_PRO_MODE) == IEC958_AES1_PRO_MODE_STEREOPHONIC) {
snd_iprintf(buffer, "stereo\n");
} else {
snd_iprintf(buffer, "not indicated\n");
}
snd_iprintf(buffer, "Userbits: ");
switch (status[1] & IEC958_AES1_PRO_USERBITS) {
case IEC958_AES1_PRO_USERBITS_192:
snd_iprintf(buffer, "192bit\n");
break;
case IEC958_AES1_PRO_USERBITS_UDEF:
snd_iprintf(buffer, "user-defined\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
snd_iprintf(buffer, "Sample Bits: ");
switch (status[2] & IEC958_AES2_PRO_SBITS) {
case IEC958_AES2_PRO_SBITS_20:
snd_iprintf(buffer, "20 bit\n");
break;
case IEC958_AES2_PRO_SBITS_24:
snd_iprintf(buffer, "24 bit\n");
break;
case IEC958_AES2_PRO_SBITS_UDEF:
snd_iprintf(buffer, "user defined\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
snd_iprintf(buffer, "Word Length: ");
switch (status[2] & IEC958_AES2_PRO_WORDLEN) {
case IEC958_AES2_PRO_WORDLEN_22_18:
snd_iprintf(buffer, "22 bit or 18 bit\n");
break;
case IEC958_AES2_PRO_WORDLEN_23_19:
snd_iprintf(buffer, "23 bit or 19 bit\n");
break;
case IEC958_AES2_PRO_WORDLEN_24_20:
snd_iprintf(buffer, "24 bit or 20 bit\n");
break;
case IEC958_AES2_PRO_WORDLEN_20_16:
snd_iprintf(buffer, "20 bit or 16 bit\n");
break;
default:
snd_iprintf(buffer, "unknown\n");
break;
}
}
}
static void snd_ca0106_proc_iec958(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
u32 value;
value = snd_ca0106_ptr_read(emu, SAMPLE_RATE_TRACKER_STATUS, 0);
snd_iprintf(buffer, "Status: %s, %s, %s\n",
(value & 0x100000) ? "Rate Locked" : "Not Rate Locked",
(value & 0x200000) ? "SPDIF Locked" : "No SPDIF Lock",
(value & 0x400000) ? "Audio Valid" : "No valid audio" );
snd_iprintf(buffer, "Estimated sample rate: %u\n",
((value & 0xfffff) * 48000) / 0x8000 );
if (value & 0x200000) {
snd_iprintf(buffer, "IEC958/SPDIF input status:\n");
value = snd_ca0106_ptr_read(emu, SPDIF_INPUT_STATUS, 0);
snd_ca0106_proc_dump_iec958(buffer, value);
}
snd_iprintf(buffer, "\n");
}
static void snd_ca0106_proc_reg_write32(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned long flags;
char line[64];
u32 reg, val;
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%x %x", ®, &val) != 2)
continue;
if (reg < 0x40 && val <= 0xffffffff) {
spin_lock_irqsave(&emu->emu_lock, flags);
outl(val, emu->port + (reg & 0xfffffffc));
spin_unlock_irqrestore(&emu->emu_lock, flags);
}
}
}
static void snd_ca0106_proc_reg_read32(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned long value;
unsigned long flags;
int i;
snd_iprintf(buffer, "Registers:\n\n");
for(i = 0; i < 0x20; i+=4) {
spin_lock_irqsave(&emu->emu_lock, flags);
value = inl(emu->port + i);
spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "Register %02X: %08lX\n", i, value);
}
}
static void snd_ca0106_proc_reg_read16(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned int value;
unsigned long flags;
int i;
snd_iprintf(buffer, "Registers:\n\n");
for(i = 0; i < 0x20; i+=2) {
spin_lock_irqsave(&emu->emu_lock, flags);
value = inw(emu->port + i);
spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "Register %02X: %04X\n", i, value);
}
}
static void snd_ca0106_proc_reg_read8(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned int value;
unsigned long flags;
int i;
snd_iprintf(buffer, "Registers:\n\n");
for(i = 0; i < 0x20; i+=1) {
spin_lock_irqsave(&emu->emu_lock, flags);
value = inb(emu->port + i);
spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "Register %02X: %02X\n", i, value);
}
}
static void snd_ca0106_proc_reg_read1(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned long value;
int i,j;
snd_iprintf(buffer, "Registers\n");
for(i = 0; i < 0x40; i++) {
snd_iprintf(buffer, "%02X: ",i);
for (j = 0; j < 4; j++) {
value = snd_ca0106_ptr_read(emu, i, j);
snd_iprintf(buffer, "%08lX ", value);
}
snd_iprintf(buffer, "\n");
}
}
static void snd_ca0106_proc_reg_read2(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
unsigned long value;
int i,j;
snd_iprintf(buffer, "Registers\n");
for(i = 0x40; i < 0x80; i++) {
snd_iprintf(buffer, "%02X: ",i);
for (j = 0; j < 4; j++) {
value = snd_ca0106_ptr_read(emu, i, j);
snd_iprintf(buffer, "%08lX ", value);
}
snd_iprintf(buffer, "\n");
}
}
static void snd_ca0106_proc_reg_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
char line[64];
unsigned int reg, channel_id , val;
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%x %x %x", ®, &channel_id, &val) != 3)
continue;
if (reg < 0x80 && val <= 0xffffffff && channel_id <= 3)
snd_ca0106_ptr_write(emu, reg, channel_id, val);
}
}
static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ca0106 *emu = entry->private_data;
char line[64];
unsigned int reg, val;
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%x %x", ®, &val) != 2)
continue;
if ((reg <= 0x7f) || (val <= 0x1ff)) {
snd_ca0106_i2c_write(emu, reg, val);
}
}
}
int __devinit snd_ca0106_proc_init(struct snd_ca0106 * emu)
{
struct snd_info_entry *entry;
if(! snd_card_proc_new(emu->card, "iec958", &entry))
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_iec958);
if(! snd_card_proc_new(emu->card, "ca0106_reg32", &entry)) {
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read32);
entry->c.text.write = snd_ca0106_proc_reg_write32;
entry->mode |= S_IWUSR;
}
if(! snd_card_proc_new(emu->card, "ca0106_reg16", &entry))
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read16);
if(! snd_card_proc_new(emu->card, "ca0106_reg8", &entry))
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read8);
if(! snd_card_proc_new(emu->card, "ca0106_regs1", &entry)) {
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read1);
entry->c.text.write = snd_ca0106_proc_reg_write;
entry->mode |= S_IWUSR;
}
if(! snd_card_proc_new(emu->card, "ca0106_i2c", &entry)) {
entry->c.text.write = snd_ca0106_proc_i2c_write;
entry->private_data = emu;
entry->mode |= S_IWUSR;
}
if(! snd_card_proc_new(emu->card, "ca0106_regs2", &entry))
snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read2);
return 0;
}
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
tyler6389/count_kernel_grand | sound/pci/ice1712/pontis.c | 8155 | 22572 | /*
* ALSA driver for ICEnsemble VT1724 (Envy24HT)
*
* Lowlevel functions for Pontis MS300
*
* Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/tlv.h>
#include "ice1712.h"
#include "envy24ht.h"
#include "pontis.h"
/* I2C addresses */
#define WM_DEV 0x34
#define CS_DEV 0x20
/* WM8776 registers */
#define WM_HP_ATTEN_L 0x00 /* headphone left attenuation */
#define WM_HP_ATTEN_R 0x01 /* headphone left attenuation */
#define WM_HP_MASTER 0x02 /* headphone master (both channels) */
/* override LLR */
#define WM_DAC_ATTEN_L 0x03 /* digital left attenuation */
#define WM_DAC_ATTEN_R 0x04
#define WM_DAC_MASTER 0x05
#define WM_PHASE_SWAP 0x06 /* DAC phase swap */
#define WM_DAC_CTRL1 0x07
#define WM_DAC_MUTE 0x08
#define WM_DAC_CTRL2 0x09
#define WM_DAC_INT 0x0a
#define WM_ADC_INT 0x0b
#define WM_MASTER_CTRL 0x0c
#define WM_POWERDOWN 0x0d
#define WM_ADC_ATTEN_L 0x0e
#define WM_ADC_ATTEN_R 0x0f
#define WM_ALC_CTRL1 0x10
#define WM_ALC_CTRL2 0x11
#define WM_ALC_CTRL3 0x12
#define WM_NOISE_GATE 0x13
#define WM_LIMITER 0x14
#define WM_ADC_MUX 0x15
#define WM_OUT_MUX 0x16
#define WM_RESET 0x17
/*
* GPIO
*/
#define PONTIS_CS_CS (1<<4) /* CS */
#define PONTIS_CS_CLK (1<<5) /* CLK */
#define PONTIS_CS_RDATA (1<<6) /* CS8416 -> VT1720 */
#define PONTIS_CS_WDATA (1<<7) /* VT1720 -> CS8416 */
/*
* get the current register value of WM codec
*/
static unsigned short wm_get(struct snd_ice1712 *ice, int reg)
{
reg <<= 1;
return ((unsigned short)ice->akm[0].images[reg] << 8) |
ice->akm[0].images[reg + 1];
}
/*
* set the register value of WM codec and remember it
*/
static void wm_put_nocache(struct snd_ice1712 *ice, int reg, unsigned short val)
{
unsigned short cval;
cval = (reg << 9) | val;
snd_vt1724_write_i2c(ice, WM_DEV, cval >> 8, cval & 0xff);
}
static void wm_put(struct snd_ice1712 *ice, int reg, unsigned short val)
{
wm_put_nocache(ice, reg, val);
reg <<= 1;
ice->akm[0].images[reg] = val >> 8;
ice->akm[0].images[reg + 1] = val;
}
/*
* DAC volume attenuation mixer control (-64dB to 0dB)
*/
#define DAC_0dB 0xff
#define DAC_RES 128
#define DAC_MIN (DAC_0dB - DAC_RES)
static int wm_dac_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0; /* mute */
uinfo->value.integer.max = DAC_RES; /* 0dB, 0.5dB step */
return 0;
}
static int wm_dac_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short val;
int i;
mutex_lock(&ice->gpio_mutex);
for (i = 0; i < 2; i++) {
val = wm_get(ice, WM_DAC_ATTEN_L + i) & 0xff;
val = val > DAC_MIN ? (val - DAC_MIN) : 0;
ucontrol->value.integer.value[i] = val;
}
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short oval, nval;
int i, idx, change = 0;
mutex_lock(&ice->gpio_mutex);
for (i = 0; i < 2; i++) {
nval = ucontrol->value.integer.value[i];
nval = (nval ? (nval + DAC_MIN) : 0) & 0xff;
idx = WM_DAC_ATTEN_L + i;
oval = wm_get(ice, idx) & 0xff;
if (oval != nval) {
wm_put(ice, idx, nval);
wm_put_nocache(ice, idx, nval | 0x100);
change = 1;
}
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* ADC gain mixer control (-64dB to 0dB)
*/
#define ADC_0dB 0xcf
#define ADC_RES 128
#define ADC_MIN (ADC_0dB - ADC_RES)
static int wm_adc_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0; /* mute (-64dB) */
uinfo->value.integer.max = ADC_RES; /* 0dB, 0.5dB step */
return 0;
}
static int wm_adc_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short val;
int i;
mutex_lock(&ice->gpio_mutex);
for (i = 0; i < 2; i++) {
val = wm_get(ice, WM_ADC_ATTEN_L + i) & 0xff;
val = val > ADC_MIN ? (val - ADC_MIN) : 0;
ucontrol->value.integer.value[i] = val;
}
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_adc_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short ovol, nvol;
int i, idx, change = 0;
mutex_lock(&ice->gpio_mutex);
for (i = 0; i < 2; i++) {
nvol = ucontrol->value.integer.value[i];
nvol = nvol ? (nvol + ADC_MIN) : 0;
idx = WM_ADC_ATTEN_L + i;
ovol = wm_get(ice, idx) & 0xff;
if (ovol != nvol) {
wm_put(ice, idx, nvol);
change = 1;
}
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* ADC input mux mixer control
*/
#define wm_adc_mux_info snd_ctl_boolean_mono_info
static int wm_adc_mux_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int bit = kcontrol->private_value;
mutex_lock(&ice->gpio_mutex);
ucontrol->value.integer.value[0] = (wm_get(ice, WM_ADC_MUX) & (1 << bit)) ? 1 : 0;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_adc_mux_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
int bit = kcontrol->private_value;
unsigned short oval, nval;
int change;
mutex_lock(&ice->gpio_mutex);
nval = oval = wm_get(ice, WM_ADC_MUX);
if (ucontrol->value.integer.value[0])
nval |= (1 << bit);
else
nval &= ~(1 << bit);
change = nval != oval;
if (change) {
wm_put(ice, WM_ADC_MUX, nval);
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* Analog bypass (In -> Out)
*/
#define wm_bypass_info snd_ctl_boolean_mono_info
static int wm_bypass_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
ucontrol->value.integer.value[0] = (wm_get(ice, WM_OUT_MUX) & 0x04) ? 1 : 0;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_bypass_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short val, oval;
int change = 0;
mutex_lock(&ice->gpio_mutex);
val = oval = wm_get(ice, WM_OUT_MUX);
if (ucontrol->value.integer.value[0])
val |= 0x04;
else
val &= ~0x04;
if (val != oval) {
wm_put(ice, WM_OUT_MUX, val);
change = 1;
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* Left/Right swap
*/
#define wm_chswap_info snd_ctl_boolean_mono_info
static int wm_chswap_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
ucontrol->value.integer.value[0] = (wm_get(ice, WM_DAC_CTRL1) & 0xf0) != 0x90;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int wm_chswap_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned short val, oval;
int change = 0;
mutex_lock(&ice->gpio_mutex);
oval = wm_get(ice, WM_DAC_CTRL1);
val = oval & 0x0f;
if (ucontrol->value.integer.value[0])
val |= 0x60;
else
val |= 0x90;
if (val != oval) {
wm_put(ice, WM_DAC_CTRL1, val);
wm_put_nocache(ice, WM_DAC_CTRL1, val);
change = 1;
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* write data in the SPI mode
*/
static void set_gpio_bit(struct snd_ice1712 *ice, unsigned int bit, int val)
{
unsigned int tmp = snd_ice1712_gpio_read(ice);
if (val)
tmp |= bit;
else
tmp &= ~bit;
snd_ice1712_gpio_write(ice, tmp);
}
static void spi_send_byte(struct snd_ice1712 *ice, unsigned char data)
{
int i;
for (i = 0; i < 8; i++) {
set_gpio_bit(ice, PONTIS_CS_CLK, 0);
udelay(1);
set_gpio_bit(ice, PONTIS_CS_WDATA, data & 0x80);
udelay(1);
set_gpio_bit(ice, PONTIS_CS_CLK, 1);
udelay(1);
data <<= 1;
}
}
static unsigned int spi_read_byte(struct snd_ice1712 *ice)
{
int i;
unsigned int val = 0;
for (i = 0; i < 8; i++) {
val <<= 1;
set_gpio_bit(ice, PONTIS_CS_CLK, 0);
udelay(1);
if (snd_ice1712_gpio_read(ice) & PONTIS_CS_RDATA)
val |= 1;
udelay(1);
set_gpio_bit(ice, PONTIS_CS_CLK, 1);
udelay(1);
}
return val;
}
static void spi_write(struct snd_ice1712 *ice, unsigned int dev, unsigned int reg, unsigned int data)
{
snd_ice1712_gpio_set_dir(ice, PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK);
snd_ice1712_gpio_set_mask(ice, ~(PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK));
set_gpio_bit(ice, PONTIS_CS_CS, 0);
spi_send_byte(ice, dev & ~1); /* WRITE */
spi_send_byte(ice, reg); /* MAP */
spi_send_byte(ice, data); /* DATA */
/* trigger */
set_gpio_bit(ice, PONTIS_CS_CS, 1);
udelay(1);
/* restore */
snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask);
snd_ice1712_gpio_set_dir(ice, ice->gpio.direction);
}
static unsigned int spi_read(struct snd_ice1712 *ice, unsigned int dev, unsigned int reg)
{
unsigned int val;
snd_ice1712_gpio_set_dir(ice, PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK);
snd_ice1712_gpio_set_mask(ice, ~(PONTIS_CS_CS|PONTIS_CS_WDATA|PONTIS_CS_CLK));
set_gpio_bit(ice, PONTIS_CS_CS, 0);
spi_send_byte(ice, dev & ~1); /* WRITE */
spi_send_byte(ice, reg); /* MAP */
/* trigger */
set_gpio_bit(ice, PONTIS_CS_CS, 1);
udelay(1);
set_gpio_bit(ice, PONTIS_CS_CS, 0);
spi_send_byte(ice, dev | 1); /* READ */
val = spi_read_byte(ice);
/* trigger */
set_gpio_bit(ice, PONTIS_CS_CS, 1);
udelay(1);
/* restore */
snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask);
snd_ice1712_gpio_set_dir(ice, ice->gpio.direction);
return val;
}
/*
* SPDIF input source
*/
static int cs_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
static const char * const texts[] = {
"Coax", /* RXP0 */
"Optical", /* RXP1 */
"CD", /* RXP2 */
};
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = 3;
if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
return 0;
}
static int cs_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
ucontrol->value.enumerated.item[0] = ice->gpio.saved[0];
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int cs_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned char val;
int change = 0;
mutex_lock(&ice->gpio_mutex);
if (ucontrol->value.enumerated.item[0] != ice->gpio.saved[0]) {
ice->gpio.saved[0] = ucontrol->value.enumerated.item[0] & 3;
val = 0x80 | (ice->gpio.saved[0] << 3);
spi_write(ice, CS_DEV, 0x04, val);
change = 1;
}
mutex_unlock(&ice->gpio_mutex);
return change;
}
/*
* GPIO controls
*/
static int pontis_gpio_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 0xffff; /* 16bit */
return 0;
}
static int pontis_gpio_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
/* 4-7 reserved */
ucontrol->value.integer.value[0] = (~ice->gpio.write_mask & 0xffff) | 0x00f0;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int pontis_gpio_mask_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
int changed;
mutex_lock(&ice->gpio_mutex);
/* 4-7 reserved */
val = (~ucontrol->value.integer.value[0] & 0xffff) | 0x00f0;
changed = val != ice->gpio.write_mask;
ice->gpio.write_mask = val;
mutex_unlock(&ice->gpio_mutex);
return changed;
}
static int pontis_gpio_dir_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
/* 4-7 reserved */
ucontrol->value.integer.value[0] = ice->gpio.direction & 0xff0f;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int pontis_gpio_dir_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val;
int changed;
mutex_lock(&ice->gpio_mutex);
/* 4-7 reserved */
val = ucontrol->value.integer.value[0] & 0xff0f;
changed = (val != ice->gpio.direction);
ice->gpio.direction = val;
mutex_unlock(&ice->gpio_mutex);
return changed;
}
static int pontis_gpio_data_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
snd_ice1712_gpio_set_dir(ice, ice->gpio.direction);
snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask);
ucontrol->value.integer.value[0] = snd_ice1712_gpio_read(ice) & 0xffff;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
static int pontis_gpio_data_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
unsigned int val, nval;
int changed = 0;
mutex_lock(&ice->gpio_mutex);
snd_ice1712_gpio_set_dir(ice, ice->gpio.direction);
snd_ice1712_gpio_set_mask(ice, ice->gpio.write_mask);
val = snd_ice1712_gpio_read(ice) & 0xffff;
nval = ucontrol->value.integer.value[0] & 0xffff;
if (val != nval) {
snd_ice1712_gpio_write(ice, nval);
changed = 1;
}
mutex_unlock(&ice->gpio_mutex);
return changed;
}
static const DECLARE_TLV_DB_SCALE(db_scale_volume, -6400, 50, 1);
/*
* mixers
*/
static struct snd_kcontrol_new pontis_controls[] __devinitdata = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "PCM Playback Volume",
.info = wm_dac_vol_info,
.get = wm_dac_vol_get,
.put = wm_dac_vol_put,
.tlv = { .p = db_scale_volume },
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Capture Volume",
.info = wm_adc_vol_info,
.get = wm_adc_vol_get,
.put = wm_adc_vol_put,
.tlv = { .p = db_scale_volume },
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "CD Capture Switch",
.info = wm_adc_mux_info,
.get = wm_adc_mux_get,
.put = wm_adc_mux_put,
.private_value = 0,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Capture Switch",
.info = wm_adc_mux_info,
.get = wm_adc_mux_get,
.put = wm_adc_mux_put,
.private_value = 1,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Bypass Switch",
.info = wm_bypass_info,
.get = wm_bypass_get,
.put = wm_bypass_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Swap Output Channels",
.info = wm_chswap_info,
.get = wm_chswap_get,
.put = wm_chswap_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "IEC958 Input Source",
.info = cs_source_info,
.get = cs_source_get,
.put = cs_source_put,
},
/* FIXME: which interface? */
{
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "GPIO Mask",
.info = pontis_gpio_mask_info,
.get = pontis_gpio_mask_get,
.put = pontis_gpio_mask_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "GPIO Direction",
.info = pontis_gpio_mask_info,
.get = pontis_gpio_dir_get,
.put = pontis_gpio_dir_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "GPIO Data",
.info = pontis_gpio_mask_info,
.get = pontis_gpio_data_get,
.put = pontis_gpio_data_put,
},
};
/*
* WM codec registers
*/
static void wm_proc_regs_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
char line[64];
unsigned int reg, val;
mutex_lock(&ice->gpio_mutex);
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%x %x", ®, &val) != 2)
continue;
if (reg <= 0x17 && val <= 0xffff)
wm_put(ice, reg, val);
}
mutex_unlock(&ice->gpio_mutex);
}
static void wm_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
int reg, val;
mutex_lock(&ice->gpio_mutex);
for (reg = 0; reg <= 0x17; reg++) {
val = wm_get(ice, reg);
snd_iprintf(buffer, "%02x = %04x\n", reg, val);
}
mutex_unlock(&ice->gpio_mutex);
}
static void wm_proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
if (! snd_card_proc_new(ice->card, "wm_codec", &entry)) {
snd_info_set_text_ops(entry, ice, wm_proc_regs_read);
entry->mode |= S_IWUSR;
entry->c.text.write = wm_proc_regs_write;
}
}
static void cs_proc_regs_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct snd_ice1712 *ice = entry->private_data;
int reg, val;
mutex_lock(&ice->gpio_mutex);
for (reg = 0; reg <= 0x26; reg++) {
val = spi_read(ice, CS_DEV, reg);
snd_iprintf(buffer, "%02x = %02x\n", reg, val);
}
val = spi_read(ice, CS_DEV, 0x7f);
snd_iprintf(buffer, "%02x = %02x\n", 0x7f, val);
mutex_unlock(&ice->gpio_mutex);
}
static void cs_proc_init(struct snd_ice1712 *ice)
{
struct snd_info_entry *entry;
if (! snd_card_proc_new(ice->card, "cs_codec", &entry))
snd_info_set_text_ops(entry, ice, cs_proc_regs_read);
}
static int __devinit pontis_add_controls(struct snd_ice1712 *ice)
{
unsigned int i;
int err;
for (i = 0; i < ARRAY_SIZE(pontis_controls); i++) {
err = snd_ctl_add(ice->card, snd_ctl_new1(&pontis_controls[i], ice));
if (err < 0)
return err;
}
wm_proc_init(ice);
cs_proc_init(ice);
return 0;
}
/*
* initialize the chip
*/
static int __devinit pontis_init(struct snd_ice1712 *ice)
{
static const unsigned short wm_inits[] = {
/* These come first to reduce init pop noise */
WM_ADC_MUX, 0x00c0, /* ADC mute */
WM_DAC_MUTE, 0x0001, /* DAC softmute */
WM_DAC_CTRL1, 0x0000, /* DAC mute */
WM_POWERDOWN, 0x0008, /* All power-up except HP */
WM_RESET, 0x0000, /* reset */
};
static const unsigned short wm_inits2[] = {
WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */
WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */
WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */
WM_DAC_CTRL1, 0x0090, /* DAC L/R */
WM_OUT_MUX, 0x0001, /* OUT DAC */
WM_HP_ATTEN_L, 0x0179, /* HP 0dB */
WM_HP_ATTEN_R, 0x0179, /* HP 0dB */
WM_DAC_ATTEN_L, 0x0000, /* DAC 0dB */
WM_DAC_ATTEN_L, 0x0100, /* DAC 0dB */
WM_DAC_ATTEN_R, 0x0000, /* DAC 0dB */
WM_DAC_ATTEN_R, 0x0100, /* DAC 0dB */
/* WM_DAC_MASTER, 0x0100, */ /* DAC master muted */
WM_PHASE_SWAP, 0x0000, /* phase normal */
WM_DAC_CTRL2, 0x0000, /* no deemphasis, no ZFLG */
WM_ADC_ATTEN_L, 0x0000, /* ADC muted */
WM_ADC_ATTEN_R, 0x0000, /* ADC muted */
#if 0
WM_ALC_CTRL1, 0x007b, /* */
WM_ALC_CTRL2, 0x0000, /* */
WM_ALC_CTRL3, 0x0000, /* */
WM_NOISE_GATE, 0x0000, /* */
#endif
WM_DAC_MUTE, 0x0000, /* DAC unmute */
WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */
};
static const unsigned char cs_inits[] = {
0x04, 0x80, /* RUN, RXP0 */
0x05, 0x05, /* slave, 24bit */
0x01, 0x00,
0x02, 0x00,
0x03, 0x00,
};
unsigned int i;
ice->vt1720 = 1;
ice->num_total_dacs = 2;
ice->num_total_adcs = 2;
/* to remember the register values */
ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
if (! ice->akm)
return -ENOMEM;
ice->akm_codecs = 1;
/* HACK - use this as the SPDIF source.
* don't call snd_ice1712_gpio_get/put(), otherwise it's overwritten
*/
ice->gpio.saved[0] = 0;
/* initialize WM8776 codec */
for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
wm_put(ice, wm_inits[i], wm_inits[i+1]);
schedule_timeout_uninterruptible(1);
for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2)
wm_put(ice, wm_inits2[i], wm_inits2[i+1]);
/* initialize CS8416 codec */
/* assert PRST#; MT05 bit 7 */
outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD));
mdelay(5);
/* deassert PRST# */
outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD));
for (i = 0; i < ARRAY_SIZE(cs_inits); i += 2)
spi_write(ice, CS_DEV, cs_inits[i], cs_inits[i+1]);
return 0;
}
/*
* Pontis boards don't provide the EEPROM data at all.
* hence the driver needs to sets up it properly.
*/
static unsigned char pontis_eeprom[] __devinitdata = {
[ICE_EEP2_SYSCONF] = 0x08, /* clock 256, mpu401, spdif-in/ADC, 1DAC */
[ICE_EEP2_ACLINK] = 0x80, /* I2S */
[ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */
[ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */
[ICE_EEP2_GPIO_DIR] = 0x07,
[ICE_EEP2_GPIO_DIR1] = 0x00,
[ICE_EEP2_GPIO_DIR2] = 0x00, /* ignored */
[ICE_EEP2_GPIO_MASK] = 0x0f, /* 4-7 reserved for CS8416 */
[ICE_EEP2_GPIO_MASK1] = 0xff,
[ICE_EEP2_GPIO_MASK2] = 0x00, /* ignored */
[ICE_EEP2_GPIO_STATE] = 0x06, /* 0-low, 1-high, 2-high */
[ICE_EEP2_GPIO_STATE1] = 0x00,
[ICE_EEP2_GPIO_STATE2] = 0x00, /* ignored */
};
/* entry point */
struct snd_ice1712_card_info snd_vt1720_pontis_cards[] __devinitdata = {
{
.subvendor = VT1720_SUBDEVICE_PONTIS_MS300,
.name = "Pontis MS300",
.model = "ms300",
.chip_init = pontis_init,
.build_controls = pontis_add_controls,
.eeprom_size = sizeof(pontis_eeprom),
.eeprom_data = pontis_eeprom,
},
{ } /* terminator */
};
| gpl-2.0 |
evitareul/android_kernel_htc_evitareul.DONTUSE | arch/xtensa/boot/lib/zmem.c | 14043 | 1984 | #include <linux/zlib.h>
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
void exit (void)
{
for (;;);
}
void *zalloc(unsigned size)
{
void *p = avail_ram;
size = (size + 7) & -8;
avail_ram += size;
if (avail_ram > end_avail) {
//puts("oops... out of memory\n");
//pause();
exit ();
}
return p;
}
#define HEAD_CRC 2
#define EXTRA_FIELD 4
#define ORIG_NAME 8
#define COMMENT 0x10
#define RESERVED 0xe0
#define DEFLATED 8
void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
{
z_stream s;
int r, i, flags;
/* skip header */
i = 10;
flags = src[3];
if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
//puts("bad gzipped data\n");
exit();
}
if ((flags & EXTRA_FIELD) != 0)
i = 12 + src[10] + (src[11] << 8);
if ((flags & ORIG_NAME) != 0)
while (src[i++] != 0)
;
if ((flags & COMMENT) != 0)
while (src[i++] != 0)
;
if ((flags & HEAD_CRC) != 0)
i += 2;
if (i >= *lenp) {
//puts("gunzip: ran out of data in header\n");
exit();
}
s.workspace = zalloc(zlib_inflate_workspacesize());
r = zlib_inflateInit2(&s, -MAX_WBITS);
if (r != Z_OK) {
//puts("inflateInit2 returned "); puthex(r); puts("\n");
exit();
}
s.next_in = src + i;
s.avail_in = *lenp - i;
s.next_out = dst;
s.avail_out = dstlen;
r = zlib_inflate(&s, Z_FINISH);
if (r != Z_OK && r != Z_STREAM_END) {
//puts("inflate returned "); puthex(r); puts("\n");
exit();
}
*lenp = s.next_out - (unsigned char *) dst;
zlib_inflateEnd(&s);
}
| gpl-2.0 |
leemgs/samsung-s3c6410-android.1.0 | drivers/video/kyro/STG4000VTG.c | 15579 | 4649 | /*
* linux/drivers/video/kyro/STG4000VTG.c
*
* Copyright (C) 2002 STMicroelectronics
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <video/kyro.h>
#include "STG4000Reg.h"
#include "STG4000Interface.h"
void DisableVGA(volatile STG4000REG __iomem *pSTGReg)
{
u32 tmp;
volatile u32 count = 0, i;
/* Reset the VGA registers */
tmp = STG_READ_REG(SoftwareReset);
CLEAR_BIT(8);
STG_WRITE_REG(SoftwareReset, tmp);
/* Just for Delay */
for (i = 0; i < 1000; i++) {
count++;
}
/* Pull-out the VGA registers from reset */
tmp = STG_READ_REG(SoftwareReset);
tmp |= SET_BIT(8);
STG_WRITE_REG(SoftwareReset, tmp);
}
void StopVTG(volatile STG4000REG __iomem *pSTGReg)
{
u32 tmp = 0;
/* Stop Ver and Hor Sync Generator */
tmp = (STG_READ_REG(DACSyncCtrl)) | SET_BIT(0) | SET_BIT(2);
CLEAR_BIT(31);
STG_WRITE_REG(DACSyncCtrl, tmp);
}
void StartVTG(volatile STG4000REG __iomem *pSTGReg)
{
u32 tmp = 0;
/* Start Ver and Hor Sync Generator */
tmp = ((STG_READ_REG(DACSyncCtrl)) | SET_BIT(31));
CLEAR_BIT(0);
CLEAR_BIT(2);
STG_WRITE_REG(DACSyncCtrl, tmp);
}
void SetupVTG(volatile STG4000REG __iomem *pSTGReg,
const struct kyrofb_info * pTiming)
{
u32 tmp = 0;
u32 margins = 0;
u32 ulBorder;
u32 xRes = pTiming->XRES;
u32 yRes = pTiming->YRES;
/* Horizontal */
u32 HAddrTime, HRightBorder, HLeftBorder;
u32 HBackPorcStrt, HFrontPorchStrt, HTotal,
HLeftBorderStrt, HRightBorderStrt, HDisplayStrt;
/* Vertical */
u32 VDisplayStrt, VBottomBorder, VTopBorder;
u32 VBackPorchStrt, VTotal, VTopBorderStrt,
VFrontPorchStrt, VBottomBorderStrt, VAddrTime;
/* Need to calculate the right border */
if ((xRes == 640) && (yRes == 480)) {
if ((pTiming->VFREQ == 60) || (pTiming->VFREQ == 72)) {
margins = 8;
}
}
/* Work out the Border */
ulBorder =
(pTiming->HTot -
(pTiming->HST + (pTiming->HBP - margins) + xRes +
(pTiming->HFP - margins))) >> 1;
/* Border the same for Vertical and Horizontal */
VBottomBorder = HLeftBorder = VTopBorder = HRightBorder = ulBorder;
/************ Get Timing values for Horizontal ******************/
HAddrTime = xRes;
HBackPorcStrt = pTiming->HST;
HTotal = pTiming->HTot;
HDisplayStrt =
pTiming->HST + (pTiming->HBP - margins) + HLeftBorder;
HLeftBorderStrt = HDisplayStrt - HLeftBorder;
HFrontPorchStrt =
pTiming->HST + (pTiming->HBP - margins) + HLeftBorder +
HAddrTime + HRightBorder;
HRightBorderStrt = HFrontPorchStrt - HRightBorder;
/************ Get Timing values for Vertical ******************/
VAddrTime = yRes;
VBackPorchStrt = pTiming->VST;
VTotal = pTiming->VTot;
VDisplayStrt =
pTiming->VST + (pTiming->VBP - margins) + VTopBorder;
VTopBorderStrt = VDisplayStrt - VTopBorder;
VFrontPorchStrt =
pTiming->VST + (pTiming->VBP - margins) + VTopBorder +
VAddrTime + VBottomBorder;
VBottomBorderStrt = VFrontPorchStrt - VBottomBorder;
/* Set Hor Timing 1, 2, 3 */
tmp = STG_READ_REG(DACHorTim1);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (HTotal) | (HBackPorcStrt << 16);
STG_WRITE_REG(DACHorTim1, tmp);
tmp = STG_READ_REG(DACHorTim2);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (HDisplayStrt << 16) | HLeftBorderStrt;
STG_WRITE_REG(DACHorTim2, tmp);
tmp = STG_READ_REG(DACHorTim3);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (HFrontPorchStrt << 16) | HRightBorderStrt;
STG_WRITE_REG(DACHorTim3, tmp);
/* Set Ver Timing 1, 2, 3 */
tmp = STG_READ_REG(DACVerTim1);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (VBackPorchStrt << 16) | (VTotal);
STG_WRITE_REG(DACVerTim1, tmp);
tmp = STG_READ_REG(DACVerTim2);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (VDisplayStrt << 16) | VTopBorderStrt;
STG_WRITE_REG(DACVerTim2, tmp);
tmp = STG_READ_REG(DACVerTim3);
CLEAR_BITS_FRM_TO(0, 11);
CLEAR_BITS_FRM_TO(16, 27);
tmp |= (VFrontPorchStrt << 16) | VBottomBorderStrt;
STG_WRITE_REG(DACVerTim3, tmp);
/* Set Verical and Horizontal Polarity */
tmp = STG_READ_REG(DACSyncCtrl) | SET_BIT(3) | SET_BIT(1);
if ((pTiming->HSP > 0) && (pTiming->VSP < 0)) { /* +hsync -vsync */
tmp &= ~0x8;
} else if ((pTiming->HSP < 0) && (pTiming->VSP > 0)) { /* -hsync +vsync */
tmp &= ~0x2;
} else if ((pTiming->HSP < 0) && (pTiming->VSP < 0)) { /* -hsync -vsync */
tmp &= ~0xA;
} else if ((pTiming->HSP > 0) && (pTiming->VSP > 0)) { /* +hsync -vsync */
tmp &= ~0x0;
}
STG_WRITE_REG(DACSyncCtrl, tmp);
}
| gpl-2.0 |
omnirom/android_kernel_huawei_angler | arch/mips/alchemy/common/time.c | 2524 | 5088 | /*
* Copyright (C) 2008-2009 Manuel Lauss <manuel.lauss@gmail.com>
*
* Previous incarnations were:
* Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
* Copied and modified Carsten Langgaard's time.c
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*
* Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
* databooks). Firmware/Board init code must enable the counters in the
* counter control register, otherwise the CP0 counter clocksource/event
* will be installed instead (and use of 'wait' instruction is prohibited).
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/idle.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
/* 32kHz clock enabled and detected */
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
static cycle_t au1x_counter1_read(struct clocksource *cs)
{
return au_readl(SYS_RTCREAD);
}
static struct clocksource au1x_counter1_clocksource = {
.name = "alchemy-counter1",
.read = au1x_counter1_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.rating = 1500,
};
static int au1x_rtcmatch2_set_next_event(unsigned long delta,
struct clock_event_device *cd)
{
delta += au_readl(SYS_RTCREAD);
/* wait for register access */
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21)
;
au_writel(delta, SYS_RTCMATCH2);
au_sync();
return 0;
}
static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
struct clock_event_device *cd)
{
}
static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 1500,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = cpu_all_mask,
};
static struct irqaction au1x_rtcmatch2_irqaction = {
.handler = au1x_rtcmatch2_irq,
.flags = IRQF_TIMER,
.name = "timer",
.dev_id = &au1x_rtcmatch2_clockdev,
};
static int __init alchemy_time_init(unsigned int m2int)
{
struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
unsigned long t;
au1x_rtcmatch2_clockdev.irq = m2int;
/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
* has been detected. If so install the rtcmatch2 clocksource,
* otherwise don't bother. Note that both bits being set is by
* no means a definite guarantee that the counters actually work
* (the 32S bit seems to be stuck set to 1 once a single clock-
* edge is detected, hence the timeouts).
*/
if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
goto cntr_err;
/*
* setup counter 1 (RTC) to tick at full speed
*/
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */
au_sync();
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
au_writel(0, SYS_RTCWRITE);
au_sync();
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
/* register counter1 clocksource and event device */
clocksource_register_hz(&au1x_counter1_clocksource, 32768);
cd->shift = 32;
cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(9, cd); /* ~0.28ms */
clockevents_register_device(cd);
setup_irq(m2int, &au1x_rtcmatch2_irqaction);
printk(KERN_INFO "Alchemy clocksource installed\n");
return 0;
cntr_err:
return -1;
}
static int alchemy_m2inttab[] __initdata = {
AU1000_RTC_MATCH2_INT,
AU1500_RTC_MATCH2_INT,
AU1100_RTC_MATCH2_INT,
AU1550_RTC_MATCH2_INT,
AU1200_RTC_MATCH2_INT,
AU1300_RTC_MATCH2_INT,
};
void __init plat_time_init(void)
{
int t;
t = alchemy_get_cputype();
if (t == ALCHEMY_CPU_UNKNOWN ||
alchemy_time_init(alchemy_m2inttab[t]))
cpu_wait = NULL; /* wait doesn't work with r4k timer */
}
| gpl-2.0 |
Pafcholini/Nadia-kernel-LL--Update-Linux-N910F-EUR-LL-OpenSource | drivers/ata/sata_qstor.c | 2780 | 17843 | /*
* sata_qstor.c - Pacific Digital Corporation QStor SATA
*
* Maintained by: Mark Lord <mlord@pobox.com>
*
* Copyright 2005 Pacific Digital Corporation.
* (OSL/GPL code release authorized by Jalil Fadavi).
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
#define DRV_VERSION "0.09"
enum {
QS_MMIO_BAR = 4,
QS_PORTS = 4,
QS_MAX_PRD = LIBATA_MAX_PRD,
QS_CPB_ORDER = 6,
QS_CPB_BYTES = (1 << QS_CPB_ORDER),
QS_PRD_BYTES = QS_MAX_PRD * 16,
QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
/* global register offsets */
QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
QS_HID_HPHY = 0x0004, /* host physical interface info */
QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
QS_HST_SFF = 0x0100, /* host status fifo offset */
QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
/* global control bits */
QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
QS_CNFG3_GSRST = 0x01, /* global chip reset */
QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
/* per-channel register offsets */
QS_CCF_CPBA = 0x0710, /* chan CPB base address */
QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
/* channel control bits */
QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
QS_CTR0_CLER = (1 << 2), /* clear channel errors */
QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
/* pkt sub-field headers */
QS_HCB_HDR = 0x01, /* Host Control Block header */
QS_DCB_HDR = 0x02, /* Device Control Block header */
/* pkt HCB flag bits */
QS_HF_DIRO = (1 << 0), /* data DIRection Out */
QS_HF_DAT = (1 << 3), /* DATa pkt */
QS_HF_IEN = (1 << 4), /* Interrupt ENable */
QS_HF_VLD = (1 << 5), /* VaLiD pkt */
/* pkt DCB flag bits */
QS_DF_PORD = (1 << 2), /* Pio OR Dma */
QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
/* PCI device IDs */
board_2068_idx = 0, /* QStor 4-port SATA/RAID */
};
enum {
QS_DMA_BOUNDARY = ~0UL
};
typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
struct qs_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
qs_state_t state;
};
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
static void qs_error_handler(struct ata_port *ap);
static struct scsi_host_template qs_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = QS_MAX_PRD,
.dma_boundary = QS_DMA_BOUNDARY,
};
static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
.freeze = qs_freeze,
.thaw = qs_thaw,
.prereset = qs_prereset,
.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
.scr_read = qs_scr_read,
.scr_write = qs_scr_write,
.port_start = qs_port_start,
.host_stop = qs_host_stop,
};
static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &qs_ata_ops,
},
};
static const struct pci_device_id qs_ata_pci_tbl[] = {
{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
{ } /* terminate list */
};
static struct pci_driver qs_ata_pci_driver = {
.name = DRV_NAME,
.id_table = qs_ata_pci_tbl,
.probe = qs_ata_init_one,
.remove = ata_pci_remove_one,
};
static void __iomem *qs_mmio_base(struct ata_host *host)
{
return host->iomap[QS_MMIO_BAR];
}
static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 1; /* ATAPI DMA not supported */
}
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
struct qs_port_priv *pp = ap->private_data;
pp->state = qs_state_mmio;
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
static inline void qs_reset_channel_logic(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
readb(chan + QS_CCT_CTR0); /* flush */
qs_enter_reg_mode(ap);
}
static void qs_freeze(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
qs_enter_reg_mode(ap);
}
static void qs_thaw(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
qs_enter_reg_mode(ap);
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
static int qs_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
qs_reset_channel_logic(ap);
return ata_sff_prereset(link, deadline);
}
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static void qs_error_handler(struct ata_port *ap)
{
qs_enter_reg_mode(ap);
ata_sff_error_handler(ap);
}
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct qs_port_priv *pp = ap->private_data;
u8 *prd = pp->pkt + QS_CPB_BYTES;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u64 addr;
u32 len;
addr = sg_dma_address(sg);
*(__le64 *)prd = cpu_to_le64(addr);
prd += sizeof(u64);
len = sg_dma_len(sg);
*(__le32 *)prd = cpu_to_le32(len);
prd += sizeof(u64);
VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
(unsigned long long)addr, len);
}
return si;
}
static void qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
u64 addr;
unsigned int nelem;
VPRINTK("ENTER\n");
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return;
nelem = qs_fill_sg(qc);
if ((qc->tf.flags & ATA_TFLAG_WRITE))
hflags |= QS_HF_DIRO;
if ((qc->tf.flags & ATA_TFLAG_LBA48))
dflags |= QS_DF_ELBA;
/* host control block (HCB) */
buf[ 0] = QS_HCB_HDR;
buf[ 1] = hflags;
*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
/* device control block (DCB) */
buf[24] = QS_DCB_HDR;
buf[28] = dflags;
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
VPRINTK("ENTER, ap %p\n", ap);
writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
wmb(); /* flush PRDs and pkt to memory */
writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
readl(chan + QS_CCT_CFF); /* flush */
}
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pp->state = qs_state_pkt;
qs_packet_start(qc);
return 0;
case ATAPI_PROT_DMA:
BUG();
break;
default:
break;
}
pp->state = qs_state_mmio;
return ata_sff_qc_issue(qc);
}
static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
{
qc->err_mask |= ac_err_mask(status);
if (!qc->err_mask) {
ata_qc_complete(qc);
} else {
struct ata_port *ap = qc->ap;
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "status 0x%02X", status);
if (qc->err_mask == AC_ERR_DEV)
ata_port_abort(ap);
else
ata_port_freeze(ap);
}
}
static inline unsigned int qs_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0;
u8 sFFE;
u8 __iomem *mmio_base = qs_mmio_base(host);
do {
u32 sff0 = readl(mmio_base + QS_HST_SFF);
u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
sFFE = sff1 >> 31; /* empty flag */
if (sEVLD) {
u8 sDST = sff0 >> 16; /* dev status */
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
if (!pp || pp->state != qs_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
switch (sHST) {
case 0: /* successful CPB */
case 3: /* device error */
qs_enter_reg_mode(qc->ap);
qs_do_or_die(qc, sDST);
break;
default:
break;
}
}
}
} while (!sFFE);
return handled;
}
static inline unsigned int qs_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (!qc) {
/*
* The qstor hardware generates spurious
* interrupts from time to time when switching
* in and out of packet mode. There's no
* obvious way to know if we're here now due
* to that, so just ack the irq and pretend we
* knew it was ours.. (ugh). This does not
* affect packet mode.
*/
ata_sff_check_status(ap);
handled = 1;
continue;
}
if (!pp || pp->state != qs_state_mmio)
continue;
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
static irqreturn_t qs_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int handled = 0;
unsigned long flags;
VPRINTK("ENTER\n");
spin_lock_irqsave(&host->lock, flags);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock_irqrestore(&host->lock, flags);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr =
port->data_addr = base + 0x400;
port->error_addr =
port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
port->device_addr = base + 0x430;
port->status_addr =
port->command_addr = base + 0x438;
port->altstatus_addr =
port->ctl_addr = base + 0x440;
port->scr_addr = base + 0xc00;
}
static int qs_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct qs_port_priv *pp;
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
memset(pp->pkt, 0, QS_PKT_BYTES);
ap->private_data = pp;
qs_enter_reg_mode(ap);
addr = (u64)pp->pkt_dma;
writel((u32) addr, chan + QS_CCF_CPBA);
writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
return 0;
}
static void qs_host_stop(struct ata_host *host)
{
void __iomem *mmio_base = qs_mmio_base(host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
}
static void qs_host_init(struct ata_host *host, unsigned int chip_id)
{
void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
unsigned int port_no;
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
/* reset each channel in turn */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
/* set FIFO depths to same settings as Windows driver */
writew(32, chan + QS_CFC_HUFT);
writew(32, chan + QS_CFC_HDFT);
writew(10, chan + QS_CFC_DUFT);
writew( 8, chan + QS_CFC_DDFT);
/* set CPB size in bytes, as a power of two */
writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
}
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
/*
* The QStor understands 64-bit buses, and uses 64-bit fields
* for DMA pointers regardless of bus width. We just have to
* make sure our DMA masks are set appropriately for whatever
* bridge lies between us and the QStor, and then the DMA mapping
* code will ensure we only ever "see" appropriate buffer addresses.
* If we're 32-bit limited somewhere, then our 64-bit fields will
* just end up with zeros in the upper 32-bits, without any special
* logic required outside of this routine (below).
*/
static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
u32 bus_info = readl(mmio_base + QS_HID_HPHY);
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return 0;
}
static int qs_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
struct ata_host *host;
int rc, port_no;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
return -ENODEV;
rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
if (rc)
return rc;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
unsigned int offset = port_no * 0x4000;
void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
qs_ata_setup_port(&ap->ioaddr, chan);
ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
}
/* initialize adapter */
qs_host_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
&qs_ata_sht);
}
module_pci_driver(qs_ata_pci_driver);
MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
S34Qu4K3/P6-U06-JellyBean-Kernel-3.0.8---China-Version | drivers/scsi/isci/port.c | 2780 | 53245 | /*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "isci.h"
#include "port.h"
#include "request.h"
#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
#define SCU_DUMMY_INDEX (0xFFFF)
static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
{
unsigned long flags;
dev_dbg(&iport->isci_host->pdev->dev,
"%s: iport = %p, state = 0x%x\n",
__func__, iport, status);
/* XXX pointless lock */
spin_lock_irqsave(&iport->state_lock, flags);
iport->status = status;
spin_unlock_irqrestore(&iport->state_lock, flags);
}
static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
{
u8 index;
proto->all = 0;
for (index = 0; index < SCI_MAX_PHYS; index++) {
struct isci_phy *iphy = iport->phy_table[index];
if (!iphy)
continue;
sci_phy_get_protocols(iphy, proto);
}
}
static u32 sci_port_get_phys(struct isci_port *iport)
{
u32 index;
u32 mask;
mask = 0;
for (index = 0; index < SCI_MAX_PHYS; index++)
if (iport->phy_table[index])
mask |= (1 << index);
return mask;
}
/**
* sci_port_get_properties() - This method simply returns the properties
* regarding the port, such as: physical index, protocols, sas address, etc.
* @port: this parameter specifies the port for which to retrieve the physical
* index.
* @properties: This parameter specifies the properties structure into which to
* copy the requested information.
*
* Indicate if the user specified a valid port. SCI_SUCCESS This value is
* returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
static enum sci_status sci_port_get_properties(struct isci_port *iport,
struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
return SCI_FAILURE_INVALID_PORT;
prop->index = iport->logical_port_index;
prop->phy_mask = sci_port_get_phys(iport);
sci_port_get_sas_address(iport, &prop->local.sas_address);
sci_port_get_protocols(iport, &prop->local.protocols);
sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
return SCI_SUCCESS;
}
static void sci_port_bcn_enable(struct isci_port *iport)
{
struct isci_phy *iphy;
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
iphy = iport->phy_table[i];
if (!iphy)
continue;
val = readl(&iphy->link_layer_registers->link_layer_control);
/* clear the bit by writing 1. */
writel(val, &iphy->link_layer_registers->link_layer_control);
}
}
/* called under sci_lock to stabilize phy:port associations */
void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
{
int i;
clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
wake_up(&ihost->eventq);
if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
return;
for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
struct isci_phy *iphy = iport->phy_table[i];
if (!iphy)
continue;
ihost->sas_ha.notify_port_event(&iphy->sas_phy,
PORTE_BROADCAST_RCVD);
break;
}
}
static void isci_port_bc_change_received(struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy)
{
if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
dev_dbg(&ihost->pdev->dev,
"%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
__func__, iphy, &iphy->sas_phy);
set_bit(IPORT_BCN_PENDING, &iport->flags);
atomic_inc(&iport->event);
wake_up(&ihost->eventq);
} else {
dev_dbg(&ihost->pdev->dev,
"%s: isci_phy = %p, sas_phy = %p\n",
__func__, iphy, &iphy->sas_phy);
ihost->sas_ha.notify_port_event(&iphy->sas_phy,
PORTE_BROADCAST_RCVD);
}
sci_port_bcn_enable(iport);
}
static void isci_port_link_up(struct isci_host *isci_host,
struct isci_port *iport,
struct isci_phy *iphy)
{
unsigned long flags;
struct sci_port_properties properties;
unsigned long success = true;
BUG_ON(iphy->isci_port != NULL);
iphy->isci_port = iport;
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n",
__func__, iport);
spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
isci_port_change_state(iphy->isci_port, isci_starting);
sci_port_get_properties(iport, &properties);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
u64 attached_sas_address;
iphy->sas_phy.oob_mode = SATA_OOB_MODE;
iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
/*
* For direct-attached SATA devices, the SCI core will
* automagically assign a SAS address to the end device
* for the purpose of creating a port. This SAS address
* will not be the same as assigned to the PHY and needs
* to be obtained from struct sci_port_properties properties.
*/
attached_sas_address = properties.remote.sas_address.high;
attached_sas_address <<= 32;
attached_sas_address |= properties.remote.sas_address.low;
swab64s(&attached_sas_address);
memcpy(&iphy->sas_phy.attached_sas_addr,
&attached_sas_address, sizeof(attached_sas_address));
} else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
iphy->sas_phy.oob_mode = SAS_OOB_MODE;
iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
/* Copy the attached SAS address from the IAF */
memcpy(iphy->sas_phy.attached_sas_addr,
iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
} else {
dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
success = false;
}
iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
/* Notify libsas that we have an address frame, if indeed
* we've found an SSP, SMP, or STP target */
if (success)
isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
PORTE_BYTES_DMAED);
}
/**
* isci_port_link_down() - This function is called by the sci core when a link
* becomes inactive.
* @isci_host: This parameter specifies the isci host object.
* @phy: This parameter specifies the isci phy with the active link.
* @port: This parameter specifies the isci port with the active link.
*
*/
static void isci_port_link_down(struct isci_host *isci_host,
struct isci_phy *isci_phy,
struct isci_port *isci_port)
{
struct isci_remote_device *isci_device;
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n", __func__, isci_port);
if (isci_port) {
/* check to see if this is the last phy on this port. */
if (isci_phy->sas_phy.port &&
isci_phy->sas_phy.port->num_phys == 1) {
atomic_inc(&isci_port->event);
isci_port_bcn_enable(isci_host, isci_port);
/* change the state for all devices on this port. The
* next task sent to this device will be returned as
* SAS_TASK_UNDELIVERED, and the scsi mid layer will
* remove the target
*/
list_for_each_entry(isci_device,
&isci_port->remote_dev_list,
node) {
dev_dbg(&isci_host->pdev->dev,
"%s: isci_device = %p\n",
__func__, isci_device);
set_bit(IDEV_GONE, &isci_device->flags);
}
}
isci_port_change_state(isci_port, isci_stopping);
}
/* Notify libsas of the borken link, this will trigger calls to our
* isci_port_deformed and isci_dev_gone functions.
*/
sas_phy_disconnected(&isci_phy->sas_phy);
isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
PHYE_LOSS_OF_SIGNAL);
isci_phy->isci_port = NULL;
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p - Done\n", __func__, isci_port);
}
/**
* isci_port_ready() - This function is called by the sci core when a link
* becomes ready.
* @isci_host: This parameter specifies the isci host object.
* @port: This parameter specifies the sci port with the active link.
*
*/
static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
{
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n", __func__, isci_port);
complete_all(&isci_port->start_complete);
isci_port_change_state(isci_port, isci_ready);
return;
}
/**
* isci_port_not_ready() - This function is called by the sci core when a link
* is not ready. All remote devices on this link will be removed if they are
* in the stopping state.
* @isci_host: This parameter specifies the isci host object.
* @port: This parameter specifies the sci port with the active link.
*
*/
static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
{
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n", __func__, isci_port);
}
static void isci_port_stop_complete(struct isci_host *ihost,
struct isci_port *iport,
enum sci_status completion_status)
{
dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
}
/**
* isci_port_hard_reset_complete() - This function is called by the sci core
* when the hard reset complete notification has been received.
* @port: This parameter specifies the sci port with the active link.
* @completion_status: This parameter specifies the core status for the reset
* process.
*
*/
static void isci_port_hard_reset_complete(struct isci_port *isci_port,
enum sci_status completion_status)
{
dev_dbg(&isci_port->isci_host->pdev->dev,
"%s: isci_port = %p, completion_status=%x\n",
__func__, isci_port, completion_status);
/* Save the status of the hard reset from the port. */
isci_port->hard_reset_status = completion_status;
complete_all(&isci_port->hard_reset_complete);
}
/* This method will return a true value if the specified phy can be assigned to
* this port The following is a list of phys for each port that are allowed: -
* Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
* doesn't preclude all configurations. It merely ensures that a phy is part
* of the allowable set of phy identifiers for that port. For example, one
* could assign phy 3 to port 0 and no other phys. Please refer to
* sci_port_is_phy_mask_valid() for information regarding whether the
* phy_mask for a port can be supported. bool true if this is a valid phy
* assignment for the port false if this is not a valid phy assignment for the
* port
*/
bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
{
struct isci_host *ihost = iport->owning_controller;
struct sci_user_parameters *user = &ihost->user_parameters;
/* Initialize to invalid value. */
u32 existing_phy_index = SCI_MAX_PHYS;
u32 index;
if ((iport->physical_port_index == 1) && (phy_index != 1))
return false;
if (iport->physical_port_index == 3 && phy_index != 3)
return false;
if (iport->physical_port_index == 2 &&
(phy_index == 0 || phy_index == 1))
return false;
for (index = 0; index < SCI_MAX_PHYS; index++)
if (iport->phy_table[index] && index != phy_index)
existing_phy_index = index;
/* Ensure that all of the phys in the port are capable of
* operating at the same maximum link rate.
*/
if (existing_phy_index < SCI_MAX_PHYS &&
user->phys[phy_index].max_speed_generation !=
user->phys[existing_phy_index].max_speed_generation)
return false;
return true;
}
/**
*
* @sci_port: This is the port object for which to determine if the phy mask
* can be supported.
*
* This method will return a true value if the port's phy mask can be supported
* by the SCU. The following is a list of valid PHY mask configurations for
* each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
* - Port 3 - [3] This method returns a boolean indication specifying if the
* phy mask can be supported. true if this is a valid phy assignment for the
* port false if this is not a valid phy assignment for the port
*/
static bool sci_port_is_phy_mask_valid(
struct isci_port *iport,
u32 phy_mask)
{
if (iport->physical_port_index == 0) {
if (((phy_mask & 0x0F) == 0x0F)
|| ((phy_mask & 0x03) == 0x03)
|| ((phy_mask & 0x01) == 0x01)
|| (phy_mask == 0))
return true;
} else if (iport->physical_port_index == 1) {
if (((phy_mask & 0x02) == 0x02)
|| (phy_mask == 0))
return true;
} else if (iport->physical_port_index == 2) {
if (((phy_mask & 0x0C) == 0x0C)
|| ((phy_mask & 0x04) == 0x04)
|| (phy_mask == 0))
return true;
} else if (iport->physical_port_index == 3) {
if (((phy_mask & 0x08) == 0x08)
|| (phy_mask == 0))
return true;
}
return false;
}
/*
* This method retrieves a currently active (i.e. connected) phy contained in
* the port. Currently, the lowest order phy that is connected is returned.
* This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
* returned if there are no currently active (i.e. connected to a remote end
* point) phys contained in the port. All other values specify a struct sci_phy
* object that is active in the port.
*/
static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
{
u32 index;
struct isci_phy *iphy;
for (index = 0; index < SCI_MAX_PHYS; index++) {
/* Ensure that the phy is both part of the port and currently
* connected to the remote end-point.
*/
iphy = iport->phy_table[index];
if (iphy && sci_port_active_phy(iport, iphy))
return iphy;
}
return NULL;
}
static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
{
/* Check to see if we can add this phy to a port
* that means that the phy is not part of a port and that the port does
* not already have a phy assinged to the phy index.
*/
if (!iport->phy_table[iphy->phy_index] &&
!phy_get_non_dummy_port(iphy) &&
sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
/* Phy is being added in the stopped state so we are in MPC mode
* make logical port index = physical port index
*/
iport->logical_port_index = iport->physical_port_index;
iport->phy_table[iphy->phy_index] = iphy;
sci_phy_set_port(iphy, iport);
return SCI_SUCCESS;
}
return SCI_FAILURE;
}
static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
{
/* Make sure that this phy is part of this port */
if (iport->phy_table[iphy->phy_index] == iphy &&
phy_get_non_dummy_port(iphy) == iport) {
struct isci_host *ihost = iport->owning_controller;
/* Yep it is assigned to this port so remove it */
sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
iport->phy_table[iphy->phy_index] = NULL;
return SCI_SUCCESS;
}
return SCI_FAILURE;
}
void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
{
u32 index;
sas->high = 0;
sas->low = 0;
for (index = 0; index < SCI_MAX_PHYS; index++)
if (iport->phy_table[index])
sci_phy_get_sas_address(iport->phy_table[index], sas);
}
void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
{
struct isci_phy *iphy;
/*
* Ensure that the phy is both part of the port and currently
* connected to the remote end-point.
*/
iphy = sci_port_get_a_connected_phy(iport);
if (iphy) {
if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
sci_phy_get_attached_sas_address(iphy, sas);
} else {
sci_phy_get_sas_address(iphy, sas);
sas->low += iphy->phy_index;
}
} else {
sas->high = 0;
sas->low = 0;
}
}
/**
* sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
*
* @sci_port: logical port on which we need to create the remote node context
* @rni: remote node index for this remote node context.
*
* This routine will construct a dummy remote node context data structure
* This structure will be posted to the hardware to work around a scheduler
* error in the hardware.
*/
static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
{
union scu_remote_node_context *rnc;
rnc = &iport->owning_controller->remote_node_context_table[rni];
memset(rnc, 0, sizeof(union scu_remote_node_context));
rnc->ssp.remote_sas_address_hi = 0;
rnc->ssp.remote_sas_address_lo = 0;
rnc->ssp.remote_node_index = rni;
rnc->ssp.remote_node_port_width = 1;
rnc->ssp.logical_port_index = iport->physical_port_index;
rnc->ssp.nexus_loss_timer_enable = false;
rnc->ssp.check_bit = false;
rnc->ssp.is_valid = true;
rnc->ssp.is_remote_node_context = true;
rnc->ssp.function_number = 0;
rnc->ssp.arbitration_wait_time = 0;
}
/*
* construct a dummy task context data structure. This
* structure will be posted to the hardwre to work around a scheduler error
* in the hardware.
*/
static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
{
struct isci_host *ihost = iport->owning_controller;
struct scu_task_context *task_context;
task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
memset(task_context, 0, sizeof(struct scu_task_context));
task_context->initiator_request = 1;
task_context->connection_rate = 1;
task_context->logical_port_index = iport->physical_port_index;
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
task_context->task_index = ISCI_TAG_TCI(tag);
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
task_context->remote_node_index = iport->reserved_rni;
task_context->do_not_dma_ssp_good_response = 1;
task_context->task_phase = 0x01;
}
static void sci_port_destroy_dummy_resources(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
isci_free_tag(ihost, iport->reserved_tag);
if (iport->reserved_rni != SCU_DUMMY_INDEX)
sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
1, iport->reserved_rni);
iport->reserved_rni = SCU_DUMMY_INDEX;
iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
}
void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
{
u8 index;
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->active_phy_mask & (1 << index))
sci_phy_setup_transport(iport->phy_table[index], device_id);
}
}
static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
bool do_notify_user)
{
struct isci_host *ihost = iport->owning_controller;
if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
sci_controller_clear_invalid_phy(ihost, iphy);
if (do_notify_user == true)
isci_port_link_up(ihost, iport, iphy);
}
void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
bool do_notify_user)
{
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
/* Re-assign the phy back to the LP as if it were a narrow port */
writel(iphy->phy_index,
&iport->port_pe_configuration_register[iphy->phy_index]);
if (do_notify_user == true)
isci_port_link_down(ihost, iphy, iport);
}
static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
/*
* Check to see if we have alreay reported this link as bad and if
* not go ahead and tell the SCI_USER that we have discovered an
* invalid link.
*/
if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
ihost->invalid_phy_mask |= 1 << iphy->phy_index;
dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
}
}
static bool is_port_ready_state(enum sci_port_states state)
{
switch (state) {
case SCI_PORT_READY:
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
case SCI_PORT_SUB_CONFIGURING:
return true;
default:
return false;
}
}
/* flag dummy rnc hanling when exiting a ready state */
static void port_state_machine_change(struct isci_port *iport,
enum sci_port_states state)
{
struct sci_base_state_machine *sm = &iport->sm;
enum sci_port_states old_state = sm->current_state_id;
if (is_port_ready_state(old_state) && !is_port_ready_state(state))
iport->ready_exit = true;
sci_change_state(sm, state);
iport->ready_exit = false;
}
/**
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
* @do_notify_user: This parameter specifies whether to inform the user (via
* sci_port_link_up()) as to the fact that a new phy as become ready.
*
* Determine if this phy can be assigned to this
* port . If the phy is not a valid PHY for
* this port then the function will notify the user. A PHY can only be
* part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
* the same port. none
*/
static void sci_port_general_link_up_handler(struct isci_port *iport,
struct isci_phy *iphy,
bool do_notify_user)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
sci_port_get_attached_sas_address(iport, &port_sas_address);
sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
/* If the SAS address of the new phy matches the SAS address of
* other phys in the port OR this is the first phy in the port,
* then activate the phy and allow it to be used for operations
* in this port.
*/
if ((phy_sas_address.high == port_sas_address.high &&
phy_sas_address.low == port_sas_address.low) ||
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
sci_port_activate_phy(iport, iphy, do_notify_user);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
sci_port_invalid_link_up(iport, iphy);
}
/**
* This method returns false if the port only has a single phy object assigned.
* If there are no phys or more than one phy then the method will return
* true.
* @sci_port: The port for which the wide port condition is to be checked.
*
* bool true Is returned if this is a wide ported port. false Is returned if
* this is a narrow port.
*/
static bool sci_port_is_wide(struct isci_port *iport)
{
u32 index;
u32 phy_count = 0;
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->phy_table[index] != NULL) {
phy_count++;
}
}
return phy_count != 1;
}
/**
* This method is called by the PHY object when the link is detected. if the
* port wants the PHY to continue on to the link up state then the port
* layer must return true. If the port object returns false the phy object
* must halt its attempt to go link up.
* @sci_port: The port associated with the phy object.
* @sci_phy: The phy object that is trying to go link up.
*
* true if the phy object can continue to the link up condition. true Is
* returned if this phy can continue to the ready state. false Is returned if
* can not continue on to the ready state. This notification is in place for
* wide ports and direct attached phys. Since there are no wide ported SATA
* devices this could become an invalid port configuration.
*/
bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
(iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
sci_port_is_wide(iport)) {
sci_port_invalid_link_up(iport, iphy);
return false;
}
return true;
}
static void port_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
struct isci_host *ihost = iport->owning_controller;
unsigned long flags;
u32 current_state;
spin_lock_irqsave(&ihost->scic_lock, flags);
if (tmr->cancel)
goto done;
current_state = iport->sm.current_state_id;
if (current_state == SCI_PORT_RESETTING) {
/* if the port is still in the resetting state then the timeout
* fired before the reset completed.
*/
port_state_machine_change(iport, SCI_PORT_FAILED);
} else if (current_state == SCI_PORT_STOPPED) {
/* if the port is stopped then the start request failed In this
* case stay in the stopped state.
*/
dev_err(sciport_to_dev(iport),
"%s: SCIC Port 0x%p failed to stop before tiemout.\n",
__func__,
iport);
} else if (current_state == SCI_PORT_STOPPING) {
/* if the port is still stopping then the stop has not completed */
isci_port_stop_complete(iport->owning_controller,
iport,
SCI_FAILURE_TIMEOUT);
} else {
/* The port is in the ready state and we have a timer
* reporting a timeout this should not happen.
*/
dev_err(sciport_to_dev(iport),
"%s: SCIC Port 0x%p is processing a timeout operation "
"in state %d.\n", __func__, iport, current_state);
}
done:
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
/* --------------------------------------------------------------------------- */
/**
* This function updates the hardwares VIIT entry for this port.
*
*
*/
static void sci_port_update_viit_entry(struct isci_port *iport)
{
struct sci_sas_address sas_address;
sci_port_get_sas_address(iport, &sas_address);
writel(sas_address.high,
&iport->viit_registers->initiator_sas_address_hi);
writel(sas_address.low,
&iport->viit_registers->initiator_sas_address_lo);
/* This value get cleared just in case its not already cleared */
writel(0, &iport->viit_registers->reserved);
/* We are required to update the status register last */
writel(SCU_VIIT_ENTRY_ID_VIIT |
SCU_VIIT_IPPT_INITIATOR |
((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
SCU_VIIT_STATUS_ALL_VALID,
&iport->viit_registers->status);
}
enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
{
u16 index;
struct isci_phy *iphy;
enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
/*
* Loop through all of the phys in this port and find the phy with the
* lowest maximum link rate. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
iphy = iport->phy_table[index];
if (iphy && sci_port_active_phy(iport, iphy) &&
iphy->max_negotiated_speed < max_allowed_speed)
max_allowed_speed = iphy->max_negotiated_speed;
}
return max_allowed_speed;
}
static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
pts_control_value = readl(&iport->port_task_scheduler_registers->control);
pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
/**
* sci_port_post_dummy_request() - post dummy/workaround request
* @sci_port: port to post task
*
* Prevent the hardware scheduler from posting new requests to the front
* of the scheduler queue causing a starvation problem for currently
* ongoing requests.
*
*/
static void sci_port_post_dummy_request(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u16 tag = iport->reserved_tag;
struct scu_task_context *tc;
u32 command;
tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
tc->abort = 0;
command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
ISCI_TAG_TCI(tag);
sci_controller_post_request(ihost, command);
}
/**
* This routine will abort the dummy request. This will alow the hardware to
* power down parts of the silicon to save power.
*
* @sci_port: The port on which the task must be aborted.
*
*/
static void sci_port_abort_dummy_request(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u16 tag = iport->reserved_tag;
struct scu_task_context *tc;
u32 command;
tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
tc->abort = 1;
command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
ISCI_TAG_TCI(tag);
sci_controller_post_request(ihost, command);
}
/**
*
* @sci_port: This is the struct isci_port object to resume.
*
* This method will resume the port task scheduler for this port object. none
*/
static void
sci_port_resume_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
pts_control_value = readl(&iport->port_task_scheduler_registers->control);
pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_port_suspend_port_task_scheduler(iport);
iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
if (iport->active_phy_mask != 0) {
/* At least one of the phys on the port is ready */
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
}
}
static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
isci_port_ready(ihost, iport);
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->phy_table[index]) {
writel(iport->physical_port_index,
&iport->port_pe_configuration_register[
iport->phy_table[index]->phy_index]);
}
}
sci_port_update_viit_entry(iport);
sci_port_resume_port_task_scheduler(iport);
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
*/
sci_port_post_dummy_request(iport);
}
static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u8 phys_index = iport->physical_port_index;
union scu_remote_node_context *rnc;
u16 rni = iport->reserved_rni;
u32 command;
rnc = &ihost->remote_node_context_table[rni];
rnc->ssp.is_valid = false;
/* ensure the preceding tc abort request has reached the
* controller and give it ample time to act before posting the rnc
* invalidate
*/
readl(&ihost->smu_registers->interrupt_status); /* flush */
udelay(10);
command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
sci_controller_post_request(ihost, command);
}
/**
*
* @object: This is the object which is cast to a struct isci_port object.
*
* This method will perform the actions required by the struct isci_port on
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
* the port not ready and suspends the port task scheduler. none
*/
static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
/*
* Kill the dummy task for this port if it has not yet posted
* the hardware will treat this as a NOP and just return abort
* complete.
*/
sci_port_abort_dummy_request(iport);
isci_port_not_ready(ihost, iport);
if (iport->ready_exit)
sci_port_invalidate_dummy_remote_node(iport);
}
static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
port_state_machine_change(iport,
SCI_PORT_SUB_WAITING);
} else if (iport->started_request_count == 0)
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
}
static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_port_suspend_port_task_scheduler(iport);
if (iport->ready_exit)
sci_port_invalidate_dummy_remote_node(iport);
}
enum sci_status sci_port_start(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
enum sci_status status = SCI_SUCCESS;
enum sci_port_states state;
u32 phy_mask;
state = iport->sm.current_state_id;
if (state != SCI_PORT_STOPPED) {
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
if (iport->assigned_device_count > 0) {
/* TODO This is a start failure operation because
* there are still devices assigned to this port.
* There must be no devices assigned to a port on a
* start operation.
*/
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
}
if (iport->reserved_rni == SCU_DUMMY_INDEX) {
u16 rni = sci_remote_node_table_allocate_remote_node(
&ihost->available_remote_nodes, 1);
if (rni != SCU_DUMMY_INDEX)
sci_port_construct_dummy_rnc(iport, rni);
else
status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
iport->reserved_rni = rni;
}
if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
u16 tag;
tag = isci_alloc_tag(ihost);
if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
else
sci_port_construct_dummy_task(iport, tag);
iport->reserved_tag = tag;
}
if (status == SCI_SUCCESS) {
phy_mask = sci_port_get_phys(iport);
/*
* There are one or more phys assigned to this port. Make sure
* the port's phy mask is in fact legal and supported by the
* silicon.
*/
if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
port_state_machine_change(iport,
SCI_PORT_READY);
return SCI_SUCCESS;
}
status = SCI_FAILURE;
}
if (status != SCI_SUCCESS)
sci_port_destroy_dummy_resources(iport);
return status;
}
enum sci_status sci_port_stop(struct isci_port *iport)
{
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
return SCI_SUCCESS;
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
case SCI_PORT_SUB_CONFIGURING:
case SCI_PORT_RESETTING:
port_state_machine_change(iport,
SCI_PORT_STOPPING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
{
enum sci_status status = SCI_FAILURE_INVALID_PHY;
struct isci_phy *iphy = NULL;
enum sci_port_states state;
u32 phy_index;
state = iport->sm.current_state_id;
if (state != SCI_PORT_SUB_OPERATIONAL) {
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
/* Select a phy on which we can send the hard reset request. */
for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
iphy = iport->phy_table[phy_index];
if (iphy && !sci_port_active_phy(iport, iphy)) {
/*
* We found a phy but it is not ready select
* different phy
*/
iphy = NULL;
}
}
/* If we have a phy then go ahead and start the reset procedure */
if (!iphy)
return status;
status = sci_phy_reset(iphy);
if (status != SCI_SUCCESS)
return status;
sci_mod_timer(&iport->timer, timeout);
iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
port_state_machine_change(iport, SCI_PORT_RESETTING);
return SCI_SUCCESS;
}
/**
* sci_port_add_phy() -
* @sci_port: This parameter specifies the port in which the phy will be added.
* @sci_phy: This parameter is the phy which is to be added to the port.
*
* This method will add a PHY to the selected port. This method returns an
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
* status is a failure to add the phy to the port.
*/
enum sci_status sci_port_add_phy(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_status status;
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED: {
struct sci_sas_address port_sas_address;
/* Read the port assigned SAS Address if there is one */
sci_port_get_sas_address(iport, &port_sas_address);
if (port_sas_address.high != 0 && port_sas_address.low != 0) {
struct sci_sas_address phy_sas_address;
/* Make sure that the PHY SAS Address matches the SAS Address
* for this port
*/
sci_phy_get_sas_address(iphy, &phy_sas_address);
if (port_sas_address.high != phy_sas_address.high ||
port_sas_address.low != phy_sas_address.low)
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
}
return sci_port_set_phy(iport, iphy);
}
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
status = sci_port_set_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
sci_port_general_link_up_handler(iport, iphy, true);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
return status;
case SCI_PORT_SUB_CONFIGURING:
status = sci_port_set_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
sci_port_general_link_up_handler(iport, iphy, true);
/* Re-enter the configuring state since this may be the last phy in
* the port.
*/
port_state_machine_change(iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
/**
* sci_port_remove_phy() -
* @sci_port: This parameter specifies the port in which the phy will be added.
* @sci_phy: This parameter is the phy which is to be added to the port.
*
* This method will remove the PHY from the selected PORT. This method returns
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
* other status is a failure to add the phy to the port.
*/
enum sci_status sci_port_remove_phy(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_status status;
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
return sci_port_clear_phy(iport, iphy);
case SCI_PORT_SUB_OPERATIONAL:
status = sci_port_clear_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
sci_port_deactivate_phy(iport, iphy, true);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
case SCI_PORT_SUB_CONFIGURING:
status = sci_port_clear_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
sci_port_deactivate_phy(iport, iphy, true);
/* Re-enter the configuring state since this may be the last phy in
* the port
*/
port_state_machine_change(iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_SUB_WAITING:
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
sci_port_activate_phy(iport, iphy, true);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
sci_port_general_link_up_handler(iport, iphy, true);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
* link up is the same one on which we sent the reset. It is
* possible that the phy on which we sent the reset is not the
* one that has gone link up and we want to make sure that
* phy being reset comes back. Consider the case where a
* reset is sent but before the hardware processes the reset it
* get a link up on the port because of a hot plug event.
* because of the reset request this phy will go link down
* almost immediately.
*/
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
sci_port_general_link_up_handler(iport, iphy, false);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_port_link_down(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_SUB_OPERATIONAL:
sci_port_deactivate_phy(iport, iphy, true);
/* If there are no active phys left in the port, then
* transition the port to the WAITING state until such time
* as a phy goes link up
*/
if (iport->active_phy_mask == 0)
port_state_machine_change(iport,
SCI_PORT_SUB_WAITING);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* In the resetting state we don't notify the user regarding
* link up and link down notifications. */
sci_port_deactivate_phy(iport, iphy, false);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_port_start_io(struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_SUB_WAITING:
return SCI_FAILURE_INVALID_STATE;
case SCI_PORT_SUB_OPERATIONAL:
iport->started_request_count++;
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
}
}
enum sci_status sci_port_complete_io(struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
case SCI_PORT_STOPPING:
sci_port_decrement_request_count(iport);
if (iport->started_request_count == 0)
port_state_machine_change(iport,
SCI_PORT_STOPPED);
break;
case SCI_PORT_READY:
case SCI_PORT_RESETTING:
case SCI_PORT_FAILED:
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
sci_port_decrement_request_count(iport);
break;
case SCI_PORT_SUB_CONFIGURING:
sci_port_decrement_request_count(iport);
if (iport->started_request_count == 0) {
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
}
break;
}
return SCI_SUCCESS;
}
static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
/* enable the port task scheduler in a suspended state */
pts_control_value = readl(&iport->port_task_scheduler_registers->control);
pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
pts_control_value = readl(&iport->port_task_scheduler_registers->control);
pts_control_value &=
~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
static void sci_port_post_dummy_remote_node(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u8 phys_index = iport->physical_port_index;
union scu_remote_node_context *rnc;
u16 rni = iport->reserved_rni;
u32 command;
rnc = &ihost->remote_node_context_table[rni];
rnc->ssp.is_valid = true;
command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
sci_controller_post_request(ihost, command);
/* ensure hardware has seen the post rnc command and give it
* ample time to act before sending the suspend
*/
readl(&ihost->smu_registers->interrupt_status); /* flush */
udelay(10);
command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
sci_controller_post_request(ihost, command);
}
static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
/*
* If we enter this state becasuse of a request to stop
* the port then we want to disable the hardwares port
* task scheduler. */
sci_port_disable_port_task_scheduler(iport);
}
}
static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
/* Enable and suspend the port task scheduler */
sci_port_enable_port_task_scheduler(iport);
}
static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
u32 prev_state;
prev_state = iport->sm.previous_state_id;
if (prev_state == SCI_PORT_RESETTING)
isci_port_hard_reset_complete(iport, SCI_SUCCESS);
else
isci_port_not_ready(ihost, iport);
/* Post and suspend the dummy remote node context for this port. */
sci_port_post_dummy_remote_node(iport);
/* Start the ready substate machine */
port_state_machine_change(iport,
SCI_PORT_SUB_WAITING);
}
static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_del_timer(&iport->timer);
}
static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_del_timer(&iport->timer);
sci_port_destroy_dummy_resources(iport);
}
static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
}
/* --------------------------------------------------------------------------- */
static const struct sci_base_state sci_port_state_table[] = {
[SCI_PORT_STOPPED] = {
.enter_state = sci_port_stopped_state_enter,
.exit_state = sci_port_stopped_state_exit
},
[SCI_PORT_STOPPING] = {
.exit_state = sci_port_stopping_state_exit
},
[SCI_PORT_READY] = {
.enter_state = sci_port_ready_state_enter,
},
[SCI_PORT_SUB_WAITING] = {
.enter_state = sci_port_ready_substate_waiting_enter,
},
[SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = sci_port_ready_substate_operational_enter,
.exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
.enter_state = sci_port_ready_substate_configuring_enter,
.exit_state = sci_port_ready_substate_configuring_exit
},
[SCI_PORT_RESETTING] = {
.exit_state = sci_port_resetting_state_exit
},
[SCI_PORT_FAILED] = {
.enter_state = sci_port_failed_state_enter,
}
};
void sci_port_construct(struct isci_port *iport, u8 index,
struct isci_host *ihost)
{
sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
iport->ready_exit = false;
iport->owning_controller = ihost;
iport->started_request_count = 0;
iport->assigned_device_count = 0;
iport->reserved_rni = SCU_DUMMY_INDEX;
iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
sci_init_timer(&iport->timer, port_timeout);
iport->port_task_scheduler_registers = NULL;
for (index = 0; index < SCI_MAX_PHYS; index++)
iport->phy_table[index] = NULL;
}
void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
{
INIT_LIST_HEAD(&iport->remote_dev_list);
INIT_LIST_HEAD(&iport->domain_dev_list);
spin_lock_init(&iport->state_lock);
init_completion(&iport->start_complete);
iport->isci_host = ihost;
isci_port_change_state(iport, isci_freed);
atomic_set(&iport->event, 0);
}
/**
* isci_port_get_state() - This function gets the status of the port object.
* @isci_port: This parameter points to the isci_port object
*
* status of the object as a isci_status enum.
*/
enum isci_status isci_port_get_state(
struct isci_port *isci_port)
{
return isci_port->status;
}
void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
/* notify the user. */
isci_port_bc_change_received(ihost, iport, iphy);
}
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy)
{
unsigned long flags;
enum sci_status status;
int idx, ret = TMF_RESP_FUNC_COMPLETE;
dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
__func__, iport);
init_completion(&iport->hard_reset_complete);
spin_lock_irqsave(&ihost->scic_lock, flags);
#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (status == SCI_SUCCESS) {
wait_for_completion(&iport->hard_reset_complete);
dev_dbg(&ihost->pdev->dev,
"%s: iport = %p; hard reset completion\n",
__func__, iport);
if (iport->hard_reset_status != SCI_SUCCESS)
ret = TMF_RESP_FUNC_FAILED;
} else {
ret = TMF_RESP_FUNC_FAILED;
dev_err(&ihost->pdev->dev,
"%s: iport = %p; sci_port_hard_reset call"
" failed 0x%x\n",
__func__, iport, status);
}
/* If the hard reset for the port has failed, consider this
* the same as link failures on all phys in the port.
*/
if (ret != TMF_RESP_FUNC_COMPLETE) {
dev_err(&ihost->pdev->dev,
"%s: iport = %p; hard reset failed "
"(0x%x) - driving explicit link fail for all phys\n",
__func__, iport, iport->hard_reset_status);
/* Down all phys in the port. */
spin_lock_irqsave(&ihost->scic_lock, flags);
for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
struct isci_phy *iphy = iport->phy_table[idx];
if (!iphy)
continue;
sci_phy_stop(iphy);
sci_phy_start(iphy);
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
return ret;
}
/**
* isci_port_deformed() - This function is called by libsas when a port becomes
* inactive.
* @phy: This parameter specifies the libsas phy with the inactive port.
*
*/
void isci_port_deformed(struct asd_sas_phy *phy)
{
pr_debug("%s: sas_phy = %p\n", __func__, phy);
}
/**
* isci_port_formed() - This function is called by libsas when a port becomes
* active.
* @phy: This parameter specifies the libsas phy with the active port.
*
*/
void isci_port_formed(struct asd_sas_phy *phy)
{
pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
}
| gpl-2.0 |
ChameleonOS/android_kernel_amazon_bowser-common | drivers/net/igbvf/ethtool.c | 2780 | 14949 | /*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
Copyright(c) 2009 - 2010 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/* ethtool support for igbvf */
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include "igbvf.h"
#include <linux/if_vlan.h>
struct igbvf_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
int base_stat_offset;
};
#define IGBVF_STAT(current, base) \
sizeof(((struct igbvf_adapter *)0)->current), \
offsetof(struct igbvf_adapter, current), \
offsetof(struct igbvf_adapter, base)
static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) },
{ "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) },
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
{ "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
{ "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
{ "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) },
};
#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats)
static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = {
"Link test (on/offline)"
};
#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test)
static int igbvf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 status;
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
ecmd->port = -1;
ecmd->transceiver = XCVR_DUMMY1;
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
ethtool_cmd_speed_set(ecmd, SPEED_100);
else
ethtool_cmd_speed_set(ecmd, SPEED_10);
if (status & E1000_STATUS_FD)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
static int igbvf_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
return -EOPNOTSUPP;
}
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
return -EOPNOTSUPP;
}
static u32 igbvf_get_rx_csum(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED);
}
static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (data)
adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
else
adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
return 0;
}
static u32 igbvf_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_IP_CSUM) != 0;
}
static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
{
if (data)
netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
else
netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
return 0;
}
static int igbvf_set_tso(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (data) {
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
} else {
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
}
dev_info(&adapter->pdev->dev, "TSO is %s\n",
data ? "Enabled" : "Disabled");
return 0;
}
static u32 igbvf_get_msglevel(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void igbvf_set_msglevel(struct net_device *netdev, u32 data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static int igbvf_get_regs_len(struct net_device *netdev)
{
#define IGBVF_REGS_LEN 8
return IGBVF_REGS_LEN * sizeof(u32);
}
static void igbvf_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
regs_buff[2] = er32(RDLEN(0));
regs_buff[3] = er32(RDH(0));
regs_buff[4] = er32(RDT(0));
regs_buff[5] = er32(TDLEN(0));
regs_buff[6] = er32(TDH(0));
regs_buff[7] = er32(TDT(0));
}
static int igbvf_get_eeprom_len(struct net_device *netdev)
{
return 0;
}
static int igbvf_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static int igbvf_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
return -EOPNOTSUPP;
}
static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
char firmware_version[32] = "N/A";
strncpy(drvinfo->driver, igbvf_driver_name, 32);
strncpy(drvinfo->version, igbvf_driver_version, 32);
strncpy(drvinfo->fw_version, firmware_version, 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
static void igbvf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
struct igbvf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IGBVF_MAX_RXD;
ring->tx_max_pending = IGBVF_MAX_TXD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
static int igbvf_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
int err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD);
new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD);
new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring->count) &&
(new_rx_count == adapter->rx_ring->count)) {
/* nothing to do */
return 0;
}
while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
msleep(1);
if (!netif_running(adapter->netdev)) {
adapter->tx_ring->count = new_tx_count;
adapter->rx_ring->count = new_rx_count;
goto clear_reset;
}
temp_ring = vmalloc(sizeof(struct igbvf_ring));
if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
igbvf_down(adapter);
/*
* We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
* to the tx and rx ring structs.
*/
if (new_tx_count != adapter->tx_ring->count) {
memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_tx_count;
err = igbvf_setup_tx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_tx_resources(adapter->tx_ring);
memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring));
}
if (new_rx_count != adapter->rx_ring->count) {
memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring));
temp_ring->count = new_rx_count;
err = igbvf_setup_rx_resources(adapter, temp_ring);
if (err)
goto err_setup;
igbvf_free_rx_resources(adapter->rx_ring);
memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring));
}
err_setup:
igbvf_up(adapter);
vfree(temp_ring);
clear_reset:
clear_bit(__IGBVF_RESETTING, &adapter->state);
return err;
}
static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
*data = 0;
hw->mac.ops.check_for_link(hw);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
return *data;
}
static void igbvf_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
set_bit(__IGBVF_TESTING, &adapter->state);
/*
* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (igbvf_link_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IGBVF_TESTING, &adapter->state);
msleep_interruptible(4 * 1000);
}
static void igbvf_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
}
static int igbvf_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
return -EOPNOTSUPP;
}
static int igbvf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (adapter->itr_setting <= 3)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = adapter->itr_setting >> 2;
return 0;
}
static int igbvf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
adapter->itr = IGBVF_START_ITR;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
adapter->itr = ec->rx_coalesce_usecs << 2;
adapter->itr_setting = adapter->itr;
}
writel(adapter->itr,
hw->hw_addr + adapter->rx_ring[0].itr_register);
return 0;
}
static int igbvf_nway_reset(struct net_device *netdev)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
igbvf_reinit_locked(adapter);
return 0;
}
static void igbvf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
int i;
igbvf_update_stats(adapter);
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter +
igbvf_gstrings_stats[i].stat_offset;
char *b = (char *)adapter +
igbvf_gstrings_stats[i].base_stat_offset;
data[i] = ((igbvf_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) :
(*(u32 *)p - *(u32 *)b));
}
}
static int igbvf_get_sset_count(struct net_device *dev, int stringset)
{
switch(stringset) {
case ETH_SS_TEST:
return IGBVF_TEST_LEN;
case ETH_SS_STATS:
return IGBVF_GLOBAL_STATS_LEN;
default:
return -EINVAL;
}
}
static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, igbvf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static const struct ethtool_ops igbvf_ethtool_ops = {
.get_settings = igbvf_get_settings,
.set_settings = igbvf_set_settings,
.get_drvinfo = igbvf_get_drvinfo,
.get_regs_len = igbvf_get_regs_len,
.get_regs = igbvf_get_regs,
.get_wol = igbvf_get_wol,
.set_wol = igbvf_set_wol,
.get_msglevel = igbvf_get_msglevel,
.set_msglevel = igbvf_set_msglevel,
.nway_reset = igbvf_nway_reset,
.get_link = ethtool_op_get_link,
.get_eeprom_len = igbvf_get_eeprom_len,
.get_eeprom = igbvf_get_eeprom,
.set_eeprom = igbvf_set_eeprom,
.get_ringparam = igbvf_get_ringparam,
.set_ringparam = igbvf_set_ringparam,
.get_pauseparam = igbvf_get_pauseparam,
.set_pauseparam = igbvf_set_pauseparam,
.get_rx_csum = igbvf_get_rx_csum,
.set_rx_csum = igbvf_set_rx_csum,
.get_tx_csum = igbvf_get_tx_csum,
.set_tx_csum = igbvf_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = igbvf_set_tso,
.self_test = igbvf_diag_test,
.get_sset_count = igbvf_get_sset_count,
.get_strings = igbvf_get_strings,
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
};
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
/* have to "undeclare" const on this struct to remove warnings */
SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
}
| gpl-2.0 |
Art-Chen/android_kernel_samsung_galaxys2plus-common | drivers/media/video/cx18/cx18-streams.c | 2780 | 29427 | /*
* cx18 init/start/stop/exit stream functions
*
* Derived from ivtv-streams.c
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include "cx18-driver.h"
#include "cx18-io.h"
#include "cx18-fileops.h"
#include "cx18-mailbox.h"
#include "cx18-i2c.h"
#include "cx18-queue.h"
#include "cx18-ioctl.h"
#include "cx18-streams.h"
#include "cx18-cards.h"
#include "cx18-scb.h"
#include "cx18-dvb.h"
#define CX18_DSP0_INTERRUPT_MASK 0xd0004C
static struct v4l2_file_operations cx18_v4l2_enc_fops = {
.owner = THIS_MODULE,
.read = cx18_v4l2_read,
.open = cx18_v4l2_open,
/* FIXME change to video_ioctl2 if serialization lock can be removed */
.unlocked_ioctl = cx18_v4l2_ioctl,
.release = cx18_v4l2_close,
.poll = cx18_v4l2_enc_poll,
.mmap = cx18_v4l2_mmap,
};
/* offset from 0 to register ts v4l2 minors on */
#define CX18_V4L2_ENC_TS_OFFSET 16
/* offset from 0 to register pcm v4l2 minors on */
#define CX18_V4L2_ENC_PCM_OFFSET 24
/* offset from 0 to register yuv v4l2 minors on */
#define CX18_V4L2_ENC_YUV_OFFSET 32
static struct {
const char *name;
int vfl_type;
int num_offset;
int dma;
enum v4l2_buf_type buf_type;
} cx18_stream_info[] = {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
VFL_TYPE_GRABBER, 0,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
VFL_TYPE_GRABBER, -1,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
},
{ /* CX18_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VBI_CAPTURE,
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_PRIVATE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
VFL_TYPE_GRABBER, -1,
PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
PCI_DMA_NONE, V4L2_BUF_TYPE_PRIVATE,
},
};
void cx18_dma_free(struct videobuf_queue *q,
struct cx18_stream *s, struct cx18_videobuf_buffer *buf)
{
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_vmalloc_free(&buf->vb);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static int cx18_prepare_buffer(struct videobuf_queue *q,
struct cx18_stream *s,
struct cx18_videobuf_buffer *buf,
u32 pixelformat,
unsigned int width, unsigned int height,
enum v4l2_field field)
{
struct cx18 *cx = s->cx;
int rc = 0;
/* check settings */
buf->bytes_used = 0;
if ((width < 48) || (height < 32))
return -EINVAL;
buf->vb.size = (width * height * 2);
if ((buf->vb.baddr != 0) && (buf->vb.bsize < buf->vb.size))
return -EINVAL;
/* alloc + fill struct (if changed) */
if (buf->vb.width != width || buf->vb.height != height ||
buf->vb.field != field || s->pixelformat != pixelformat ||
buf->tvnorm != cx->std) {
buf->vb.width = width;
buf->vb.height = height;
buf->vb.field = field;
buf->tvnorm = cx->std;
s->pixelformat = pixelformat;
cx18_dma_free(q, s, buf);
}
if ((buf->vb.baddr != 0) && (buf->vb.bsize < buf->vb.size))
return -EINVAL;
if (buf->vb.field == 0)
buf->vb.field = V4L2_FIELD_INTERLACED;
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
buf->vb.width = width;
buf->vb.height = height;
buf->vb.field = field;
buf->tvnorm = cx->std;
s->pixelformat = pixelformat;
rc = videobuf_iolock(q, &buf->vb, NULL);
if (rc != 0)
goto fail;
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
cx18_dma_free(q, s, buf);
return rc;
}
/* VB_MIN_BUFSIZE is lcm(1440 * 480, 1440 * 576)
1440 is a single line of 4:2:2 YUV at 720 luma samples wide
*/
#define VB_MIN_BUFFERS 32
#define VB_MIN_BUFSIZE 4147200
static int buffer_setup(struct videobuf_queue *q,
unsigned int *count, unsigned int *size)
{
struct cx18_stream *s = q->priv_data;
struct cx18 *cx = s->cx;
*size = 2 * cx->cxhdl.width * cx->cxhdl.height;
if (*count == 0)
*count = VB_MIN_BUFFERS;
while (*size * *count > VB_MIN_BUFFERS * VB_MIN_BUFSIZE)
(*count)--;
q->field = V4L2_FIELD_INTERLACED;
q->last = V4L2_FIELD_INTERLACED;
return 0;
}
static int buffer_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx18_videobuf_buffer *buf =
container_of(vb, struct cx18_videobuf_buffer, vb);
struct cx18_stream *s = q->priv_data;
struct cx18 *cx = s->cx;
return cx18_prepare_buffer(q, s, buf, s->pixelformat,
cx->cxhdl.width, cx->cxhdl.height, field);
}
static void buffer_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct cx18_videobuf_buffer *buf =
container_of(vb, struct cx18_videobuf_buffer, vb);
struct cx18_stream *s = q->priv_data;
cx18_dma_free(q, s, buf);
}
static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx18_videobuf_buffer *buf =
container_of(vb, struct cx18_videobuf_buffer, vb);
struct cx18_stream *s = q->priv_data;
buf->vb.state = VIDEOBUF_QUEUED;
list_add_tail(&buf->vb.queue, &s->vb_capture);
}
static struct videobuf_queue_ops cx18_videobuf_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
static void cx18_stream_init(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
struct video_device *video_dev = s->video_dev;
/* we need to keep video_dev, so restore it afterwards */
memset(s, 0, sizeof(*s));
s->video_dev = video_dev;
/* initialize cx18_stream fields */
s->dvb = NULL;
s->cx = cx;
s->type = type;
s->name = cx18_stream_info[type].name;
s->handle = CX18_INVALID_TASK_HANDLE;
s->dma = cx18_stream_info[type].dma;
s->buffers = cx->stream_buffers[type];
s->buf_size = cx->stream_buf_size[type];
INIT_LIST_HEAD(&s->buf_pool);
s->bufs_per_mdl = 1;
s->mdl_size = s->buf_size * s->bufs_per_mdl;
init_waitqueue_head(&s->waitq);
s->id = -1;
spin_lock_init(&s->q_free.lock);
cx18_queue_init(&s->q_free);
spin_lock_init(&s->q_busy.lock);
cx18_queue_init(&s->q_busy);
spin_lock_init(&s->q_full.lock);
cx18_queue_init(&s->q_full);
spin_lock_init(&s->q_idle.lock);
cx18_queue_init(&s->q_idle);
INIT_WORK(&s->out_work_order, cx18_out_work_handler);
INIT_LIST_HEAD(&s->vb_capture);
s->vb_timeout.function = cx18_vb_timeout;
s->vb_timeout.data = (unsigned long)s;
init_timer(&s->vb_timeout);
spin_lock_init(&s->vb_lock);
if (type == CX18_ENC_STREAM_TYPE_YUV) {
spin_lock_init(&s->vbuf_q_lock);
s->vb_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
videobuf_queue_vmalloc_init(&s->vbuf_q, &cx18_videobuf_qops,
&cx->pci_dev->dev, &s->vbuf_q_lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx18_videobuf_buffer),
s, &cx->serialize_lock);
/* Assume the previous pixel default */
s->pixelformat = V4L2_PIX_FMT_HM12;
}
}
static int cx18_prep_dev(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
u32 cap = cx->v4l2_cap;
int num_offset = cx18_stream_info[type].num_offset;
int num = cx->instance + cx18_first_minor + num_offset;
/*
* These five fields are always initialized.
* For analog capture related streams, if video_dev == NULL then the
* stream is not in use.
* For the TS stream, if dvb == NULL then the stream is not in use.
* In those cases no other fields but these four can be used.
*/
s->video_dev = NULL;
s->dvb = NULL;
s->cx = cx;
s->type = type;
s->name = cx18_stream_info[type].name;
/* Check whether the radio is supported */
if (type == CX18_ENC_STREAM_TYPE_RAD && !(cap & V4L2_CAP_RADIO))
return 0;
/* Check whether VBI is supported */
if (type == CX18_ENC_STREAM_TYPE_VBI &&
!(cap & (V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE)))
return 0;
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
cx->stream_buffers[type] == 0) {
CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
return 0;
}
cx18_stream_init(cx, type);
/* Allocate the cx18_dvb struct only for the TS on cards with DTV */
if (type == CX18_ENC_STREAM_TYPE_TS) {
if (cx->card->hw_all & CX18_HW_DVB) {
s->dvb = kzalloc(sizeof(struct cx18_dvb), GFP_KERNEL);
if (s->dvb == NULL) {
CX18_ERR("Couldn't allocate cx18_dvb structure"
" for %s\n", s->name);
return -ENOMEM;
}
} else {
/* Don't need buffers for the TS, if there is no DVB */
s->buffers = 0;
}
}
if (num_offset == -1)
return 0;
/* allocate and initialize the v4l2 video device structure */
s->video_dev = video_device_alloc();
if (s->video_dev == NULL) {
CX18_ERR("Couldn't allocate v4l2 video_device for %s\n",
s->name);
return -ENOMEM;
}
snprintf(s->video_dev->name, sizeof(s->video_dev->name), "%s %s",
cx->v4l2_dev.name, s->name);
s->video_dev->num = num;
s->video_dev->v4l2_dev = &cx->v4l2_dev;
s->video_dev->fops = &cx18_v4l2_enc_fops;
s->video_dev->release = video_device_release;
s->video_dev->tvnorms = V4L2_STD_ALL;
set_bit(V4L2_FL_USE_FH_PRIO, &s->video_dev->flags);
cx18_set_funcs(s->video_dev);
return 0;
}
/* Initialize v4l2 variables and register v4l2 devices */
int cx18_streams_setup(struct cx18 *cx)
{
int type, ret;
/* Setup V4L2 Devices */
for (type = 0; type < CX18_MAX_STREAMS; type++) {
/* Prepare device */
ret = cx18_prep_dev(cx, type);
if (ret < 0)
break;
/* Allocate Stream */
ret = cx18_stream_alloc(&cx->streams[type]);
if (ret < 0)
break;
}
if (type == CX18_MAX_STREAMS)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
cx18_streams_cleanup(cx, 0);
return ret;
}
static int cx18_reg_dev(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
int vfl_type = cx18_stream_info[type].vfl_type;
const char *name;
int num, ret;
if (type == CX18_ENC_STREAM_TYPE_TS && s->dvb != NULL) {
ret = cx18_dvb_register(s);
if (ret < 0) {
CX18_ERR("DVB failed to register\n");
return ret;
}
}
if (s->video_dev == NULL)
return 0;
num = s->video_dev->num;
/* card number + user defined offset + device offset */
if (type != CX18_ENC_STREAM_TYPE_MPG) {
struct cx18_stream *s_mpg = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
if (s_mpg->video_dev)
num = s_mpg->video_dev->num
+ cx18_stream_info[type].num_offset;
}
video_set_drvdata(s->video_dev, s);
/* Register device. First try the desired minor, then any free one. */
ret = video_register_device_no_warn(s->video_dev, vfl_type, num);
if (ret < 0) {
CX18_ERR("Couldn't register v4l2 device for %s (device node number %d)\n",
s->name, num);
video_device_release(s->video_dev);
s->video_dev = NULL;
return ret;
}
name = video_device_node_name(s->video_dev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type] / 1024,
(cx->stream_buf_size[type] * 100 / 1024) % 100);
break;
case VFL_TYPE_RADIO:
CX18_INFO("Registered device %s for %s\n", name, s->name);
break;
case VFL_TYPE_VBI:
if (cx->stream_buffers[type])
CX18_INFO("Registered device %s for %s "
"(%d x %d bytes)\n",
name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type]);
else
CX18_INFO("Registered device %s for %s\n",
name, s->name);
break;
}
return 0;
}
/* Register v4l2 devices */
int cx18_streams_register(struct cx18 *cx)
{
int type;
int err;
int ret = 0;
/* Register V4L2 devices */
for (type = 0; type < CX18_MAX_STREAMS; type++) {
err = cx18_reg_dev(cx, type);
if (err && ret == 0)
ret = err;
}
if (ret == 0)
return 0;
/* One or more streams could not be initialized. Clean 'em all up. */
cx18_streams_cleanup(cx, 1);
return ret;
}
/* Unregister v4l2 devices */
void cx18_streams_cleanup(struct cx18 *cx, int unregister)
{
struct video_device *vdev;
int type;
/* Teardown all streams */
for (type = 0; type < CX18_MAX_STREAMS; type++) {
/* The TS has a cx18_dvb structure, not a video_device */
if (type == CX18_ENC_STREAM_TYPE_TS) {
if (cx->streams[type].dvb != NULL) {
if (unregister)
cx18_dvb_unregister(&cx->streams[type]);
kfree(cx->streams[type].dvb);
cx->streams[type].dvb = NULL;
cx18_stream_free(&cx->streams[type]);
}
continue;
}
/* No struct video_device, but can have buffers allocated */
if (type == CX18_ENC_STREAM_TYPE_IDX) {
/* If the module params didn't inhibit IDX ... */
if (cx->stream_buffers[type] != 0) {
cx->stream_buffers[type] = 0;
/*
* Before calling cx18_stream_free(),
* check if the IDX stream was actually set up.
* Needed, since the cx18_probe() error path
* exits through here as well as normal clean up
*/
if (cx->streams[type].buffers != 0)
cx18_stream_free(&cx->streams[type]);
}
continue;
}
/* If struct video_device exists, can have buffers allocated */
vdev = cx->streams[type].video_dev;
cx->streams[type].video_dev = NULL;
if (vdev == NULL)
continue;
if (type == CX18_ENC_STREAM_TYPE_YUV)
videobuf_mmap_free(&cx->streams[type].vbuf_q);
cx18_stream_free(&cx->streams[type]);
/* Unregister or release device */
if (unregister)
video_unregister_device(vdev);
else
video_device_release(vdev);
}
}
static void cx18_vbi_setup(struct cx18_stream *s)
{
struct cx18 *cx = s->cx;
int raw = cx18_raw_vbi(cx);
u32 data[CX2341X_MBOX_MAX_DATA];
int lines;
if (cx->is_60hz) {
cx->vbi.count = 12;
cx->vbi.start[0] = 10;
cx->vbi.start[1] = 273;
} else { /* PAL/SECAM */
cx->vbi.count = 18;
cx->vbi.start[0] = 6;
cx->vbi.start[1] = 318;
}
/* setup VBI registers */
if (raw)
v4l2_subdev_call(cx->sd_av, vbi, s_raw_fmt, &cx->vbi.in.fmt.vbi);
else
v4l2_subdev_call(cx->sd_av, vbi, s_sliced_fmt, &cx->vbi.in.fmt.sliced);
/*
* Send the CX18_CPU_SET_RAW_VBI_PARAM API command to setup Encoder Raw
* VBI when the first analog capture channel starts, as once it starts
* (e.g. MPEG), we can't effect any change in the Encoder Raw VBI setup
* (i.e. for the VBI capture channels). We also send it for each
* analog capture channel anyway just to make sure we get the proper
* behavior
*/
if (raw) {
lines = cx->vbi.count * 2;
} else {
/*
* For 525/60 systems, according to the VIP 2 & BT.656 std:
* The EAV RP code's Field bit toggles on line 4, a few lines
* after the Vertcal Blank bit has already toggled.
* Tell the encoder to capture 21-4+1=18 lines per field,
* since we want lines 10 through 21.
*
* For 625/50 systems, according to the VIP 2 & BT.656 std:
* The EAV RP code's Field bit toggles on line 1, a few lines
* after the Vertcal Blank bit has already toggled.
* (We've actually set the digitizer so that the Field bit
* toggles on line 2.) Tell the encoder to capture 23-2+1=22
* lines per field, since we want lines 6 through 23.
*/
lines = cx->is_60hz ? (21 - 4 + 1) * 2 : (23 - 2 + 1) * 2;
}
data[0] = s->handle;
/* Lines per field */
data[1] = (lines / 2) | ((lines / 2) << 16);
/* bytes per line */
data[2] = (raw ? vbi_active_samples
: (cx->is_60hz ? vbi_hblank_samples_60Hz
: vbi_hblank_samples_50Hz));
/* Every X number of frames a VBI interrupt arrives
(frames as in 25 or 30 fps) */
data[3] = 1;
/*
* Set the SAV/EAV RP codes to look for as start/stop points
* when in VIP-1.1 mode
*/
if (raw) {
/*
* Start codes for beginning of "active" line in vertical blank
* 0x20 ( VerticalBlank )
* 0x60 ( EvenField VerticalBlank )
*/
data[4] = 0x20602060;
/*
* End codes for end of "active" raw lines and regular lines
* 0x30 ( VerticalBlank HorizontalBlank)
* 0x70 ( EvenField VerticalBlank HorizontalBlank)
* 0x90 (Task HorizontalBlank)
* 0xd0 (Task EvenField HorizontalBlank)
*/
data[5] = 0x307090d0;
} else {
/*
* End codes for active video, we want data in the hblank region
* 0xb0 (Task 0 VerticalBlank HorizontalBlank)
* 0xf0 (Task EvenField VerticalBlank HorizontalBlank)
*
* Since the V bit is only allowed to toggle in the EAV RP code,
* just before the first active region line, these two
* are problematic:
* 0x90 (Task HorizontalBlank)
* 0xd0 (Task EvenField HorizontalBlank)
*
* We have set the digitzer such that we don't have to worry
* about these problem codes.
*/
data[4] = 0xB0F0B0F0;
/*
* Start codes for beginning of active line in vertical blank
* 0xa0 (Task VerticalBlank )
* 0xe0 (Task EvenField VerticalBlank )
*/
data[5] = 0xA0E0A0E0;
}
CX18_DEBUG_INFO("Setup VBI h: %d lines %x bpl %d fr %d %x %x\n",
data[0], data[1], data[2], data[3], data[4], data[5]);
cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
}
void cx18_stream_rotate_idx_mdls(struct cx18 *cx)
{
struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
struct cx18_mdl *mdl;
if (!cx18_stream_enabled(s))
return;
/* Return if the firmware is not running low on MDLs */
if ((atomic_read(&s->q_free.depth) + atomic_read(&s->q_busy.depth)) >=
CX18_ENC_STREAM_TYPE_IDX_FW_MDL_MIN)
return;
/* Return if there are no MDLs to rotate back to the firmware */
if (atomic_read(&s->q_full.depth) < 2)
return;
/*
* Take the oldest IDX MDL still holding data, and discard its index
* entries by scheduling the MDL to go back to the firmware
*/
mdl = cx18_dequeue(s, &s->q_full);
if (mdl != NULL)
cx18_enqueue(s, mdl, &s->q_free);
}
static
struct cx18_queue *_cx18_stream_put_mdl_fw(struct cx18_stream *s,
struct cx18_mdl *mdl)
{
struct cx18 *cx = s->cx;
struct cx18_queue *q;
/* Don't give it to the firmware, if we're not running a capture */
if (s->handle == CX18_INVALID_TASK_HANDLE ||
test_bit(CX18_F_S_STOPPING, &s->s_flags) ||
!test_bit(CX18_F_S_STREAMING, &s->s_flags))
return cx18_enqueue(s, mdl, &s->q_free);
q = cx18_enqueue(s, mdl, &s->q_busy);
if (q != &s->q_busy)
return q; /* The firmware has the max MDLs it can handle */
cx18_mdl_sync_for_device(s, mdl);
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
(void __iomem *) &cx->scb->cpu_mdl[mdl->id] - cx->enc_mem,
s->bufs_per_mdl, mdl->id, s->mdl_size);
return q;
}
static
void _cx18_stream_load_fw_queue(struct cx18_stream *s)
{
struct cx18_queue *q;
struct cx18_mdl *mdl;
if (atomic_read(&s->q_free.depth) == 0 ||
atomic_read(&s->q_busy.depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
return;
/* Move from q_free to q_busy notifying the firmware, until the limit */
do {
mdl = cx18_dequeue(s, &s->q_free);
if (mdl == NULL)
break;
q = _cx18_stream_put_mdl_fw(s, mdl);
} while (atomic_read(&s->q_busy.depth) < CX18_MAX_FW_MDLS_PER_STREAM
&& q == &s->q_busy);
}
void cx18_out_work_handler(struct work_struct *work)
{
struct cx18_stream *s =
container_of(work, struct cx18_stream, out_work_order);
_cx18_stream_load_fw_queue(s);
}
static void cx18_stream_configure_mdls(struct cx18_stream *s)
{
cx18_unload_queues(s);
switch (s->type) {
case CX18_ENC_STREAM_TYPE_YUV:
/*
* Height should be a multiple of 32 lines.
* Set the MDL size to the exact size needed for one frame.
* Use enough buffers per MDL to cover the MDL size
*/
if (s->pixelformat == V4L2_PIX_FMT_HM12)
s->mdl_size = 720 * s->cx->cxhdl.height * 3 / 2;
else
s->mdl_size = 720 * s->cx->cxhdl.height * 2;
s->bufs_per_mdl = s->mdl_size / s->buf_size;
if (s->mdl_size % s->buf_size)
s->bufs_per_mdl++;
break;
case CX18_ENC_STREAM_TYPE_VBI:
s->bufs_per_mdl = 1;
if (cx18_raw_vbi(s->cx)) {
s->mdl_size = (s->cx->is_60hz ? 12 : 18)
* 2 * vbi_active_samples;
} else {
/*
* See comment in cx18_vbi_setup() below about the
* extra lines we capture in sliced VBI mode due to
* the lines on which EAV RP codes toggle.
*/
s->mdl_size = s->cx->is_60hz
? (21 - 4 + 1) * 2 * vbi_hblank_samples_60Hz
: (23 - 2 + 1) * 2 * vbi_hblank_samples_50Hz;
}
break;
default:
s->bufs_per_mdl = 1;
s->mdl_size = s->buf_size * s->bufs_per_mdl;
break;
}
cx18_load_queues(s);
}
int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
{
u32 data[MAX_MB_ARGUMENTS];
struct cx18 *cx = s->cx;
int captype = 0;
struct cx18_stream *s_idx;
if (!cx18_stream_enabled(s))
return -EINVAL;
CX18_DEBUG_INFO("Start encoder stream %s\n", s->name);
switch (s->type) {
case CX18_ENC_STREAM_TYPE_MPG:
captype = CAPTURE_CHANNEL_TYPE_MPEG;
cx->mpg_data_received = cx->vbi_data_inserted = 0;
cx->dualwatch_jiffies = jiffies;
cx->dualwatch_stereo_mode = v4l2_ctrl_g_ctrl(cx->cxhdl.audio_mode);
cx->search_pack_header = 0;
break;
case CX18_ENC_STREAM_TYPE_IDX:
captype = CAPTURE_CHANNEL_TYPE_INDEX;
break;
case CX18_ENC_STREAM_TYPE_TS:
captype = CAPTURE_CHANNEL_TYPE_TS;
break;
case CX18_ENC_STREAM_TYPE_YUV:
captype = CAPTURE_CHANNEL_TYPE_YUV;
break;
case CX18_ENC_STREAM_TYPE_PCM:
captype = CAPTURE_CHANNEL_TYPE_PCM;
break;
case CX18_ENC_STREAM_TYPE_VBI:
#ifdef CX18_ENCODER_PARSES_SLICED
captype = cx18_raw_vbi(cx) ?
CAPTURE_CHANNEL_TYPE_VBI : CAPTURE_CHANNEL_TYPE_SLICED_VBI;
#else
/*
* Currently we set things up so that Sliced VBI from the
* digitizer is handled as Raw VBI by the encoder
*/
captype = CAPTURE_CHANNEL_TYPE_VBI;
#endif
cx->vbi.frame = 0;
cx->vbi.inserted_frame = 0;
memset(cx->vbi.sliced_mpeg_size,
0, sizeof(cx->vbi.sliced_mpeg_size));
break;
default:
return -EINVAL;
}
/* Clear Streamoff flags in case left from last capture */
clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
cx18_vapi_result(cx, data, CX18_CREATE_TASK, 1, CPU_CMD_MASK_CAPTURE);
s->handle = data[0];
cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype);
/*
* For everything but CAPTURE_CHANNEL_TYPE_TS, play it safe and
* set up all the parameters, as it is not obvious which parameters the
* firmware shares across capture channel types and which it does not.
*
* Some of the cx18_vapi() calls below apply to only certain capture
* channel types. We're hoping there's no harm in calling most of them
* anyway, as long as the values are all consistent. Setting some
* shared parameters will have no effect once an analog capture channel
* has started streaming.
*/
if (captype != CAPTURE_CHANNEL_TYPE_TS) {
cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0);
cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1);
cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 8, 0);
cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 4, 1);
/*
* Audio related reset according to
* Documentation/video4linux/cx2341x/fw-encoder-api.txt
*/
if (atomic_read(&cx->ana_capturing) == 0)
cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
s->handle, 12);
/*
* Number of lines for Field 1 & Field 2 according to
* Documentation/video4linux/cx2341x/fw-encoder-api.txt
* Field 1 is 312 for 625 line systems in BT.656
* Field 2 is 313 for 625 line systems in BT.656
*/
cx18_vapi(cx, CX18_CPU_SET_CAPTURE_LINE_NO, 3,
s->handle, 312, 313);
if (cx->v4l2_cap & V4L2_CAP_VBI_CAPTURE)
cx18_vbi_setup(s);
/*
* Select to receive I, P, and B frame index entries, if the
* index stream is enabled. Otherwise disable index entry
* generation.
*/
s_idx = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 2,
s->handle, cx18_stream_enabled(s_idx) ? 7 : 0);
/* Call out to the common CX2341x API setup for user controls */
cx->cxhdl.priv = s;
cx2341x_handler_setup(&cx->cxhdl);
/*
* When starting a capture and we're set for radio,
* ensure the video is muted, despite the user control.
*/
if (!cx->cxhdl.video_mute &&
test_bit(CX18_F_I_RADIO_USER, &cx->i_flags))
cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
(v4l2_ctrl_g_ctrl(cx->cxhdl.video_mute_yuv) << 8) | 1);
/* Enable the Video Format Converter for UYVY 4:2:2 support,
* rather than the default HM12 Macroblovk 4:2:0 support.
*/
if (captype == CAPTURE_CHANNEL_TYPE_YUV) {
if (s->pixelformat == V4L2_PIX_FMT_UYVY)
cx18_vapi(cx, CX18_CPU_SET_VFC_PARAM, 2,
s->handle, 1);
else
/* If in doubt, default to HM12 */
cx18_vapi(cx, CX18_CPU_SET_VFC_PARAM, 2,
s->handle, 0);
}
}
if (atomic_read(&cx->tot_capturing) == 0) {
cx2341x_handler_set_busy(&cx->cxhdl, 1);
clear_bit(CX18_F_I_EOS, &cx->i_flags);
cx18_write_reg(cx, 7, CX18_DSP0_INTERRUPT_MASK);
}
cx18_vapi(cx, CX18_CPU_DE_SET_MDL_ACK, 3, s->handle,
(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
(void __iomem *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
/* Init all the cpu_mdls for this stream */
cx18_stream_configure_mdls(s);
_cx18_stream_load_fw_queue(s);
/* begin_capture */
if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
CX18_DEBUG_WARN("Error starting capture!\n");
/* Ensure we're really not capturing before releasing MDLs */
set_bit(CX18_F_S_STOPPING, &s->s_flags);
if (s->type == CX18_ENC_STREAM_TYPE_MPG)
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
else
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
clear_bit(CX18_F_S_STREAMING, &s->s_flags);
/* FIXME - CX18_F_S_STREAMOFF as well? */
cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
s->handle = CX18_INVALID_TASK_HANDLE;
clear_bit(CX18_F_S_STOPPING, &s->s_flags);
if (atomic_read(&cx->tot_capturing) == 0) {
set_bit(CX18_F_I_EOS, &cx->i_flags);
cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
}
return -EINVAL;
}
/* you're live! sit back and await interrupts :) */
if (captype != CAPTURE_CHANNEL_TYPE_TS)
atomic_inc(&cx->ana_capturing);
atomic_inc(&cx->tot_capturing);
return 0;
}
EXPORT_SYMBOL(cx18_start_v4l2_encode_stream);
void cx18_stop_all_captures(struct cx18 *cx)
{
int i;
for (i = CX18_MAX_STREAMS - 1; i >= 0; i--) {
struct cx18_stream *s = &cx->streams[i];
if (!cx18_stream_enabled(s))
continue;
if (test_bit(CX18_F_S_STREAMING, &s->s_flags))
cx18_stop_v4l2_encode_stream(s, 0);
}
}
int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
{
struct cx18 *cx = s->cx;
unsigned long then;
if (!cx18_stream_enabled(s))
return -EINVAL;
/* This function assumes that you are allowed to stop the capture
and that we are actually capturing */
CX18_DEBUG_INFO("Stop Capture\n");
if (atomic_read(&cx->tot_capturing) == 0)
return 0;
set_bit(CX18_F_S_STOPPING, &s->s_flags);
if (s->type == CX18_ENC_STREAM_TYPE_MPG)
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end);
else
cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
then = jiffies;
if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) {
CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
}
if (s->type != CX18_ENC_STREAM_TYPE_TS)
atomic_dec(&cx->ana_capturing);
atomic_dec(&cx->tot_capturing);
/* Clear capture and no-read bits */
clear_bit(CX18_F_S_STREAMING, &s->s_flags);
/* Tell the CX23418 it can't use our buffers anymore */
cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
s->handle = CX18_INVALID_TASK_HANDLE;
clear_bit(CX18_F_S_STOPPING, &s->s_flags);
if (atomic_read(&cx->tot_capturing) > 0)
return 0;
cx2341x_handler_set_busy(&cx->cxhdl, 0);
cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
wake_up(&s->waitq);
return 0;
}
EXPORT_SYMBOL(cx18_stop_v4l2_encode_stream);
u32 cx18_find_handle(struct cx18 *cx)
{
int i;
/* find first available handle to be used for global settings */
for (i = 0; i < CX18_MAX_STREAMS; i++) {
struct cx18_stream *s = &cx->streams[i];
if (s->video_dev && (s->handle != CX18_INVALID_TASK_HANDLE))
return s->handle;
}
return CX18_INVALID_TASK_HANDLE;
}
struct cx18_stream *cx18_handle_to_stream(struct cx18 *cx, u32 handle)
{
int i;
struct cx18_stream *s;
if (handle == CX18_INVALID_TASK_HANDLE)
return NULL;
for (i = 0; i < CX18_MAX_STREAMS; i++) {
s = &cx->streams[i];
if (s->handle != handle)
continue;
if (cx18_stream_enabled(s))
return s;
}
return NULL;
}
| gpl-2.0 |
Skin1980/bass | drivers/ata/sata_qstor.c | 2780 | 17843 | /*
* sata_qstor.c - Pacific Digital Corporation QStor SATA
*
* Maintained by: Mark Lord <mlord@pobox.com>
*
* Copyright 2005 Pacific Digital Corporation.
* (OSL/GPL code release authorized by Jalil Fadavi).
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sata_qstor"
#define DRV_VERSION "0.09"
enum {
QS_MMIO_BAR = 4,
QS_PORTS = 4,
QS_MAX_PRD = LIBATA_MAX_PRD,
QS_CPB_ORDER = 6,
QS_CPB_BYTES = (1 << QS_CPB_ORDER),
QS_PRD_BYTES = QS_MAX_PRD * 16,
QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
/* global register offsets */
QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
QS_HID_HPHY = 0x0004, /* host physical interface info */
QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
QS_HST_SFF = 0x0100, /* host status fifo offset */
QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
/* global control bits */
QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
QS_CNFG3_GSRST = 0x01, /* global chip reset */
QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
/* per-channel register offsets */
QS_CCF_CPBA = 0x0710, /* chan CPB base address */
QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
/* channel control bits */
QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
QS_CTR0_CLER = (1 << 2), /* clear channel errors */
QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
/* pkt sub-field headers */
QS_HCB_HDR = 0x01, /* Host Control Block header */
QS_DCB_HDR = 0x02, /* Device Control Block header */
/* pkt HCB flag bits */
QS_HF_DIRO = (1 << 0), /* data DIRection Out */
QS_HF_DAT = (1 << 3), /* DATa pkt */
QS_HF_IEN = (1 << 4), /* Interrupt ENable */
QS_HF_VLD = (1 << 5), /* VaLiD pkt */
/* pkt DCB flag bits */
QS_DF_PORD = (1 << 2), /* Pio OR Dma */
QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
/* PCI device IDs */
board_2068_idx = 0, /* QStor 4-port SATA/RAID */
};
enum {
QS_DMA_BOUNDARY = ~0UL
};
typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
struct qs_port_priv {
u8 *pkt;
dma_addr_t pkt_dma;
qs_state_t state;
};
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
static void qs_qc_prep(struct ata_queued_cmd *qc);
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
static void qs_freeze(struct ata_port *ap);
static void qs_thaw(struct ata_port *ap);
static int qs_prereset(struct ata_link *link, unsigned long deadline);
static void qs_error_handler(struct ata_port *ap);
static struct scsi_host_template qs_ata_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = QS_MAX_PRD,
.dma_boundary = QS_DMA_BOUNDARY,
};
static struct ata_port_operations qs_ata_ops = {
.inherits = &ata_sff_port_ops,
.check_atapi_dma = qs_check_atapi_dma,
.qc_prep = qs_qc_prep,
.qc_issue = qs_qc_issue,
.freeze = qs_freeze,
.thaw = qs_thaw,
.prereset = qs_prereset,
.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
.scr_read = qs_scr_read,
.scr_write = qs_scr_write,
.port_start = qs_port_start,
.host_stop = qs_host_stop,
};
static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &qs_ata_ops,
},
};
static const struct pci_device_id qs_ata_pci_tbl[] = {
{ PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
{ } /* terminate list */
};
static struct pci_driver qs_ata_pci_driver = {
.name = DRV_NAME,
.id_table = qs_ata_pci_tbl,
.probe = qs_ata_init_one,
.remove = ata_pci_remove_one,
};
static void __iomem *qs_mmio_base(struct ata_host *host)
{
return host->iomap[QS_MMIO_BAR];
}
static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
{
return 1; /* ATAPI DMA not supported */
}
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
struct qs_port_priv *pp = ap->private_data;
pp->state = qs_state_mmio;
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
static inline void qs_reset_channel_logic(struct ata_port *ap)
{
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
readb(chan + QS_CCT_CTR0); /* flush */
qs_enter_reg_mode(ap);
}
static void qs_freeze(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
qs_enter_reg_mode(ap);
}
static void qs_thaw(struct ata_port *ap)
{
u8 __iomem *mmio_base = qs_mmio_base(ap->host);
qs_enter_reg_mode(ap);
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
static int qs_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
qs_reset_channel_logic(ap);
return ata_sff_prereset(link, deadline);
}
static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static void qs_error_handler(struct ata_port *ap)
{
qs_enter_reg_mode(ap);
ata_sff_error_handler(ap);
}
static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
return 0;
}
static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct qs_port_priv *pp = ap->private_data;
u8 *prd = pp->pkt + QS_CPB_BYTES;
unsigned int si;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u64 addr;
u32 len;
addr = sg_dma_address(sg);
*(__le64 *)prd = cpu_to_le64(addr);
prd += sizeof(u64);
len = sg_dma_len(sg);
*(__le32 *)prd = cpu_to_le32(len);
prd += sizeof(u64);
VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
(unsigned long long)addr, len);
}
return si;
}
static void qs_qc_prep(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
u64 addr;
unsigned int nelem;
VPRINTK("ENTER\n");
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return;
nelem = qs_fill_sg(qc);
if ((qc->tf.flags & ATA_TFLAG_WRITE))
hflags |= QS_HF_DIRO;
if ((qc->tf.flags & ATA_TFLAG_LBA48))
dflags |= QS_DF_ELBA;
/* host control block (HCB) */
buf[ 0] = QS_HCB_HDR;
buf[ 1] = hflags;
*(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
*(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
*(__le64 *)(&buf[16]) = cpu_to_le64(addr);
/* device control block (DCB) */
buf[24] = QS_DCB_HDR;
buf[28] = dflags;
/* frame information structure (FIS) */
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
}
static inline void qs_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
VPRINTK("ENTER, ap %p\n", ap);
writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
wmb(); /* flush PRDs and pkt to memory */
writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
readl(chan + QS_CCT_CFF); /* flush */
}
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
{
struct qs_port_priv *pp = qc->ap->private_data;
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pp->state = qs_state_pkt;
qs_packet_start(qc);
return 0;
case ATAPI_PROT_DMA:
BUG();
break;
default:
break;
}
pp->state = qs_state_mmio;
return ata_sff_qc_issue(qc);
}
static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
{
qc->err_mask |= ac_err_mask(status);
if (!qc->err_mask) {
ata_qc_complete(qc);
} else {
struct ata_port *ap = qc->ap;
struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "status 0x%02X", status);
if (qc->err_mask == AC_ERR_DEV)
ata_port_abort(ap);
else
ata_port_freeze(ap);
}
}
static inline unsigned int qs_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0;
u8 sFFE;
u8 __iomem *mmio_base = qs_mmio_base(host);
do {
u32 sff0 = readl(mmio_base + QS_HST_SFF);
u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
sFFE = sff1 >> 31; /* empty flag */
if (sEVLD) {
u8 sDST = sff0 >> 16; /* dev status */
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
handled = 1;
if (!pp || pp->state != qs_state_pkt)
continue;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
switch (sHST) {
case 0: /* successful CPB */
case 3: /* device error */
qs_enter_reg_mode(qc->ap);
qs_do_or_die(qc, sDST);
break;
default:
break;
}
}
}
} while (!sFFE);
return handled;
}
static inline unsigned int qs_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (!qc) {
/*
* The qstor hardware generates spurious
* interrupts from time to time when switching
* in and out of packet mode. There's no
* obvious way to know if we're here now due
* to that, so just ack the irq and pretend we
* knew it was ours.. (ugh). This does not
* affect packet mode.
*/
ata_sff_check_status(ap);
handled = 1;
continue;
}
if (!pp || pp->state != qs_state_mmio)
continue;
if (!(qc->tf.flags & ATA_TFLAG_POLLING))
handled |= ata_sff_port_intr(ap, qc);
}
return handled;
}
static irqreturn_t qs_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int handled = 0;
unsigned long flags;
VPRINTK("ENTER\n");
spin_lock_irqsave(&host->lock, flags);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock_irqrestore(&host->lock, flags);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
{
port->cmd_addr =
port->data_addr = base + 0x400;
port->error_addr =
port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
port->device_addr = base + 0x430;
port->status_addr =
port->command_addr = base + 0x438;
port->altstatus_addr =
port->ctl_addr = base + 0x440;
port->scr_addr = base + 0xc00;
}
static int qs_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct qs_port_priv *pp;
void __iomem *mmio_base = qs_mmio_base(ap->host);
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
GFP_KERNEL);
if (!pp->pkt)
return -ENOMEM;
memset(pp->pkt, 0, QS_PKT_BYTES);
ap->private_data = pp;
qs_enter_reg_mode(ap);
addr = (u64)pp->pkt_dma;
writel((u32) addr, chan + QS_CCF_CPBA);
writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
return 0;
}
static void qs_host_stop(struct ata_host *host)
{
void __iomem *mmio_base = qs_mmio_base(host);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
}
static void qs_host_init(struct ata_host *host, unsigned int chip_id)
{
void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
unsigned int port_no;
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
/* reset each channel in turn */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
}
writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
for (port_no = 0; port_no < host->n_ports; ++port_no) {
u8 __iomem *chan = mmio_base + (port_no * 0x4000);
/* set FIFO depths to same settings as Windows driver */
writew(32, chan + QS_CFC_HUFT);
writew(32, chan + QS_CFC_HDFT);
writew(10, chan + QS_CFC_DUFT);
writew( 8, chan + QS_CFC_DDFT);
/* set CPB size in bytes, as a power of two */
writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
}
writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
}
/*
* The QStor understands 64-bit buses, and uses 64-bit fields
* for DMA pointers regardless of bus width. We just have to
* make sure our DMA masks are set appropriately for whatever
* bridge lies between us and the QStor, and then the DMA mapping
* code will ensure we only ever "see" appropriate buffer addresses.
* If we're 32-bit limited somewhere, then our 64-bit fields will
* just end up with zeros in the upper 32-bits, without any special
* logic required outside of this routine (below).
*/
static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
{
u32 bus_info = readl(mmio_base + QS_HID_HPHY);
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return 0;
}
static int qs_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
struct ata_host *host;
int rc, port_no;
ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
if (!host)
return -ENOMEM;
/* acquire resources and fill host */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
return -ENODEV;
rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
if (rc)
return rc;
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
unsigned int offset = port_no * 0x4000;
void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
qs_ata_setup_port(&ap->ioaddr, chan);
ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
}
/* initialize adapter */
qs_host_init(host, board_idx);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
&qs_ata_sht);
}
module_pci_driver(qs_ata_pci_driver);
MODULE_AUTHOR("Mark Lord");
MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
htc-mirror/ruby-ics-crc-3.0.16-fd362fb | drivers/usb/host/imx21-dbg.c | 3036 | 12820 | /*
* Copyright (c) 2009 by Martin Fuzzey
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of imx21-hcd.c */
#ifndef DEBUG
static inline void create_debug_files(struct imx21 *imx21) { }
static inline void remove_debug_files(struct imx21 *imx21) { }
static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
int status) {}
static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
struct urb *urb) {}
static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
struct urb *urb) {}
static inline void debug_etd_allocated(struct imx21 *imx21) {}
static inline void debug_etd_freed(struct imx21 *imx21) {}
static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
static inline void debug_isoc_submitted(struct imx21 *imx21,
int frame, struct td *td) {}
static inline void debug_isoc_completed(struct imx21 *imx21,
int frame, struct td *td, int cc, int len) {}
#else
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static const char *dir_labels[] = {
"TD 0",
"OUT",
"IN",
"TD 1"
};
static const char *speed_labels[] = {
"Full",
"Low"
};
static const char *format_labels[] = {
"Control",
"ISO",
"Bulk",
"Interrupt"
};
static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
struct urb *urb)
{
return usb_pipeisoc(urb->pipe) ?
&imx21->isoc_stats : &imx21->nonisoc_stats;
}
static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
{
stats_for_urb(imx21, urb)->submitted++;
}
static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
{
if (st)
stats_for_urb(imx21, urb)->completed_failed++;
else
stats_for_urb(imx21, urb)->completed_ok++;
}
static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
{
stats_for_urb(imx21, urb)->unlinked++;
}
static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
{
stats_for_urb(imx21, urb)->queue_etd++;
}
static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
{
stats_for_urb(imx21, urb)->queue_dmem++;
}
static inline void debug_etd_allocated(struct imx21 *imx21)
{
imx21->etd_usage.maximum = max(
++(imx21->etd_usage.value),
imx21->etd_usage.maximum);
}
static inline void debug_etd_freed(struct imx21 *imx21)
{
imx21->etd_usage.value--;
}
static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
{
imx21->dmem_usage.value += size;
imx21->dmem_usage.maximum = max(
imx21->dmem_usage.value,
imx21->dmem_usage.maximum);
}
static inline void debug_dmem_freed(struct imx21 *imx21, int size)
{
imx21->dmem_usage.value -= size;
}
static void debug_isoc_submitted(struct imx21 *imx21,
int frame, struct td *td)
{
struct debug_isoc_trace *trace = &imx21->isoc_trace[
imx21->isoc_trace_index++];
imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
trace->schedule_frame = td->frame;
trace->submit_frame = frame;
trace->request_len = td->len;
trace->td = td;
}
static inline void debug_isoc_completed(struct imx21 *imx21,
int frame, struct td *td, int cc, int len)
{
struct debug_isoc_trace *trace, *trace_failed;
int i;
int found = 0;
trace = imx21->isoc_trace;
for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
if (trace->td == td) {
trace->done_frame = frame;
trace->done_len = len;
trace->cc = cc;
trace->td = NULL;
found = 1;
break;
}
}
if (found && cc) {
trace_failed = &imx21->isoc_trace_failed[
imx21->isoc_trace_index_failed++];
imx21->isoc_trace_index_failed %= ARRAY_SIZE(
imx21->isoc_trace_failed);
*trace_failed = *trace;
}
}
static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
{
if (ep)
snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
ep->desc.bEndpointAddress,
usb_endpoint_type(&ep->desc),
ep);
else
snprintf(buf, bufsize, "none");
return buf;
}
static char *format_etd_dword0(u32 value, char *buf, int bufsize)
{
snprintf(buf, bufsize,
"addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
value & 0x7F,
(value >> DW0_ENDPNT) & 0x0F,
dir_labels[(value >> DW0_DIRECT) & 0x03],
speed_labels[(value >> DW0_SPEED) & 0x01],
format_labels[(value >> DW0_FORMAT) & 0x03],
(value >> DW0_HALTED) & 0x01);
return buf;
}
static int debug_status_show(struct seq_file *s, void *v)
{
struct imx21 *imx21 = s->private;
int etds_allocated = 0;
int etds_sw_busy = 0;
int etds_hw_busy = 0;
int dmem_blocks = 0;
int queued_for_etd = 0;
int queued_for_dmem = 0;
unsigned int dmem_bytes = 0;
int i;
struct etd_priv *etd;
u32 etd_enable_mask;
unsigned long flags;
struct imx21_dmem_area *dmem;
struct ep_priv *ep_priv;
spin_lock_irqsave(&imx21->lock, flags);
etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
if (etd->alloc)
etds_allocated++;
if (etd->urb)
etds_sw_busy++;
if (etd_enable_mask & (1<<i))
etds_hw_busy++;
}
list_for_each_entry(dmem, &imx21->dmem_list, list) {
dmem_bytes += dmem->size;
dmem_blocks++;
}
list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
queued_for_etd++;
list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
queued_for_dmem++;
spin_unlock_irqrestore(&imx21->lock, flags);
seq_printf(s,
"Frame: %d\n"
"ETDs allocated: %d/%d (max=%d)\n"
"ETDs in use sw: %d\n"
"ETDs in use hw: %d\n"
"DMEM alocated: %d/%d (max=%d)\n"
"DMEM blocks: %d\n"
"Queued waiting for ETD: %d\n"
"Queued waiting for DMEM: %d\n",
readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
etds_sw_busy,
etds_hw_busy,
dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
dmem_blocks,
queued_for_etd,
queued_for_dmem);
return 0;
}
static int debug_dmem_show(struct seq_file *s, void *v)
{
struct imx21 *imx21 = s->private;
struct imx21_dmem_area *dmem;
unsigned long flags;
char ep_text[40];
spin_lock_irqsave(&imx21->lock, flags);
list_for_each_entry(dmem, &imx21->dmem_list, list)
seq_printf(s,
"%04X: size=0x%X "
"ep=%s\n",
dmem->offset, dmem->size,
format_ep(dmem->ep, ep_text, sizeof(ep_text)));
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static int debug_etd_show(struct seq_file *s, void *v)
{
struct imx21 *imx21 = s->private;
struct etd_priv *etd;
char buf[60];
u32 dword;
int i, j;
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
int state = -1;
struct urb_priv *urb_priv;
if (etd->urb) {
urb_priv = etd->urb->hcpriv;
if (urb_priv)
state = urb_priv->state;
}
seq_printf(s,
"etd_num: %d\n"
"ep: %s\n"
"alloc: %d\n"
"len: %d\n"
"busy sw: %d\n"
"busy hw: %d\n"
"urb state: %d\n"
"current urb: %p\n",
i,
format_ep(etd->ep, buf, sizeof(buf)),
etd->alloc,
etd->len,
etd->urb != NULL,
(readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
state,
etd->urb);
for (j = 0; j < 4; j++) {
dword = etd_readl(imx21, i, j);
switch (j) {
case 0:
format_etd_dword0(dword, buf, sizeof(buf));
break;
case 2:
snprintf(buf, sizeof(buf),
"cc=0X%02X", dword >> DW2_COMPCODE);
break;
default:
*buf = 0;
break;
}
seq_printf(s,
"dword %d: submitted=%08X cur=%08X [%s]\n",
j,
etd->submitted_dwords[j],
dword,
buf);
}
seq_printf(s, "\n");
}
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static void debug_statistics_show_one(struct seq_file *s,
const char *name, struct debug_stats *stats)
{
seq_printf(s, "%s:\n"
"submitted URBs: %lu\n"
"completed OK: %lu\n"
"completed failed: %lu\n"
"unlinked: %lu\n"
"queued for ETD: %lu\n"
"queued for DMEM: %lu\n\n",
name,
stats->submitted,
stats->completed_ok,
stats->completed_failed,
stats->unlinked,
stats->queue_etd,
stats->queue_dmem);
}
static int debug_statistics_show(struct seq_file *s, void *v)
{
struct imx21 *imx21 = s->private;
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static void debug_isoc_show_one(struct seq_file *s,
const char *name, int index, struct debug_isoc_trace *trace)
{
seq_printf(s, "%s %d:\n"
"cc=0X%02X\n"
"scheduled frame %d (%d)\n"
"submitted frame %d (%d)\n"
"completed frame %d (%d)\n"
"requested length=%d\n"
"completed length=%d\n\n",
name, index,
trace->cc,
trace->schedule_frame, trace->schedule_frame & 0xFFFF,
trace->submit_frame, trace->submit_frame & 0xFFFF,
trace->done_frame, trace->done_frame & 0xFFFF,
trace->request_len,
trace->done_len);
}
static int debug_isoc_show(struct seq_file *s, void *v)
{
struct imx21 *imx21 = s->private;
struct debug_isoc_trace *trace;
unsigned long flags;
int i;
spin_lock_irqsave(&imx21->lock, flags);
trace = imx21->isoc_trace_failed;
for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
debug_isoc_show_one(s, "isoc failed", i, trace);
trace = imx21->isoc_trace;
for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
debug_isoc_show_one(s, "isoc", i, trace);
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static int debug_status_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_status_show, inode->i_private);
}
static int debug_dmem_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_dmem_show, inode->i_private);
}
static int debug_etd_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_etd_show, inode->i_private);
}
static int debug_statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_statistics_show, inode->i_private);
}
static int debug_isoc_open(struct inode *inode, struct file *file)
{
return single_open(file, debug_isoc_show, inode->i_private);
}
static const struct file_operations debug_status_fops = {
.open = debug_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations debug_dmem_fops = {
.open = debug_dmem_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations debug_etd_fops = {
.open = debug_etd_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations debug_statistics_fops = {
.open = debug_statistics_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations debug_isoc_fops = {
.open = debug_isoc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void create_debug_files(struct imx21 *imx21)
{
imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
if (!imx21->debug_root)
goto failed_create_rootdir;
if (!debugfs_create_file("status", S_IRUGO,
imx21->debug_root, imx21, &debug_status_fops))
goto failed_create;
if (!debugfs_create_file("dmem", S_IRUGO,
imx21->debug_root, imx21, &debug_dmem_fops))
goto failed_create;
if (!debugfs_create_file("etd", S_IRUGO,
imx21->debug_root, imx21, &debug_etd_fops))
goto failed_create;
if (!debugfs_create_file("statistics", S_IRUGO,
imx21->debug_root, imx21, &debug_statistics_fops))
goto failed_create;
if (!debugfs_create_file("isoc", S_IRUGO,
imx21->debug_root, imx21, &debug_isoc_fops))
goto failed_create;
return;
failed_create:
debugfs_remove_recursive(imx21->debug_root);
failed_create_rootdir:
imx21->debug_root = NULL;
}
static void remove_debug_files(struct imx21 *imx21)
{
if (imx21->debug_root) {
debugfs_remove_recursive(imx21->debug_root);
imx21->debug_root = NULL;
}
}
#endif
| gpl-2.0 |
aloksinha2001/rk3x_kernel_3.0.36 | fs/btrfs/inode-item.c | 3036 | 6442 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
static int find_name_in_backref(struct btrfs_path *path, const char *name,
int name_len, struct btrfs_inode_ref **ref_ret)
{
struct extent_buffer *leaf;
struct btrfs_inode_ref *ref;
unsigned long ptr;
unsigned long name_ptr;
u32 item_size;
u32 cur_offset = 0;
int len;
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
while (cur_offset < item_size) {
ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
len = btrfs_inode_ref_name_len(leaf, ref);
name_ptr = (unsigned long)(ref + 1);
cur_offset += len + sizeof(*ref);
if (len != name_len)
continue;
if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
*ref_ret = ref;
return 1;
}
}
return 0;
}
struct btrfs_inode_ref *
btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, int mod)
{
struct btrfs_key key;
struct btrfs_inode_ref *ref;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
int ret;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_REF_KEY;
key.offset = ref_objectid;
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0)
return NULL;
if (!find_name_in_backref(path, name, name_len, &ref))
return NULL;
return ref;
}
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
struct extent_buffer *leaf;
unsigned long ptr;
unsigned long item_start;
u32 item_size;
u32 sub_item_len;
int ret;
int del_len = name_len + sizeof(*ref);
key.objectid = inode_objectid;
key.offset = ref_objectid;
btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = -ENOENT;
goto out;
} else if (ret < 0) {
goto out;
}
if (!find_name_in_backref(path, name, name_len, &ref)) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if (index)
*index = btrfs_inode_ref_index(leaf, ref);
if (del_len == item_size) {
ret = btrfs_del_item(trans, root, path);
goto out;
}
ptr = (unsigned long)ref;
sub_item_len = name_len + sizeof(*ref);
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
ret = btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
unsigned long ptr;
int ret;
int ins_len = name_len + sizeof(*ref);
key.objectid = inode_objectid;
key.offset = ref_objectid;
btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
u32 old_size;
if (find_name_in_backref(path, name, name_len, &ref))
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ret = btrfs_extend_item(trans, root, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
ret = 0;
} else if (ret < 0) {
if (ret == -EOVERFLOW)
ret = -EMLINK;
goto out;
} else {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid)
{
struct btrfs_key key;
int ret;
key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_inode_item));
return ret;
}
int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path,
struct btrfs_key *location, int mod)
{
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
int ret;
int slot;
struct extent_buffer *leaf;
struct btrfs_key found_key;
ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
if (ret > 0 && btrfs_key_type(location) == BTRFS_ROOT_ITEM_KEY &&
location->offset == (u64)-1 && path->slots[0] != 0) {
slot = path->slots[0] - 1;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid == location->objectid &&
btrfs_key_type(&found_key) == btrfs_key_type(location)) {
path->slots[0]--;
return 0;
}
}
return ret;
}
| gpl-2.0 |
noobnl/android_kernel_samsung_d2-jb_2.5.1 | sound/pci/hda/patch_cirrus.c | 3292 | 52972 | /*
* HD audio interface patch for Cirrus Logic CS420x chip
*
* Copyright (c) 2009 Takashi Iwai <tiwai@suse.de>
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
#include "hda_jack.h"
#include <sound/tlv.h>
/*
*/
struct cs_spec {
int board_config;
struct auto_pin_cfg autocfg;
struct hda_multi_out multiout;
struct snd_kcontrol *vmaster_sw;
struct snd_kcontrol *vmaster_vol;
hda_nid_t dac_nid[AUTO_CFG_MAX_OUTS];
hda_nid_t slave_dig_outs[2];
unsigned int input_idx[AUTO_PIN_LAST];
unsigned int capsrc_idx[AUTO_PIN_LAST];
hda_nid_t adc_nid[AUTO_PIN_LAST];
unsigned int adc_idx[AUTO_PIN_LAST];
unsigned int num_inputs;
unsigned int cur_input;
unsigned int automic_idx;
hda_nid_t cur_adc;
unsigned int cur_adc_stream_tag;
unsigned int cur_adc_format;
hda_nid_t dig_in;
const struct hda_bind_ctls *capture_bind[2];
unsigned int gpio_mask;
unsigned int gpio_dir;
unsigned int gpio_data;
unsigned int gpio_eapd_hp; /* EAPD GPIO bit for headphones */
unsigned int gpio_eapd_speaker; /* EAPD GPIO bit for speakers */
struct hda_pcm pcm_rec[2]; /* PCM information */
unsigned int hp_detect:1;
unsigned int mic_detect:1;
/* CS421x */
unsigned int spdif_detect:1;
unsigned int sense_b:1;
hda_nid_t vendor_nid;
struct hda_input_mux input_mux;
unsigned int last_input;
};
/* available models with CS420x */
enum {
CS420X_MBP53,
CS420X_MBP55,
CS420X_IMAC27,
CS420X_IMAC27_122,
CS420X_APPLE,
CS420X_AUTO,
CS420X_MODELS
};
/* CS421x boards */
enum {
CS421X_CDB4210,
CS421X_MODELS
};
/* Vendor-specific processing widget */
#define CS420X_VENDOR_NID 0x11
#define CS_DIG_OUT1_PIN_NID 0x10
#define CS_DIG_OUT2_PIN_NID 0x15
#define CS_DMIC1_PIN_NID 0x12
#define CS_DMIC2_PIN_NID 0x0e
/* coef indices */
#define IDX_SPDIF_STAT 0x0000
#define IDX_SPDIF_CTL 0x0001
#define IDX_ADC_CFG 0x0002
/* SZC bitmask, 4 modes below:
* 0 = immediate,
* 1 = digital immediate, analog zero-cross
* 2 = digtail & analog soft-ramp
* 3 = digital soft-ramp, analog zero-cross
*/
#define CS_COEF_ADC_SZC_MASK (3 << 0)
#define CS_COEF_ADC_MIC_SZC_MODE (3 << 0) /* SZC setup for mic */
#define CS_COEF_ADC_LI_SZC_MODE (3 << 0) /* SZC setup for line-in */
/* PGA mode: 0 = differential, 1 = signle-ended */
#define CS_COEF_ADC_MIC_PGA_MODE (1 << 5) /* PGA setup for mic */
#define CS_COEF_ADC_LI_PGA_MODE (1 << 6) /* PGA setup for line-in */
#define IDX_DAC_CFG 0x0003
/* SZC bitmask, 4 modes below:
* 0 = Immediate
* 1 = zero-cross
* 2 = soft-ramp
* 3 = soft-ramp on zero-cross
*/
#define CS_COEF_DAC_HP_SZC_MODE (3 << 0) /* nid 0x02 */
#define CS_COEF_DAC_LO_SZC_MODE (3 << 2) /* nid 0x03 */
#define CS_COEF_DAC_SPK_SZC_MODE (3 << 4) /* nid 0x04 */
#define IDX_BEEP_CFG 0x0004
/* 0x0008 - test reg key */
/* 0x0009 - 0x0014 -> 12 test regs */
/* 0x0015 - visibility reg */
/*
* Cirrus Logic CS4210
*
* 1 DAC => HP(sense) / Speakers,
* 1 ADC <= LineIn(sense) / MicIn / DMicIn,
* 1 SPDIF OUT => SPDIF Trasmitter(sense)
*/
#define CS4210_DAC_NID 0x02
#define CS4210_ADC_NID 0x03
#define CS4210_VENDOR_NID 0x0B
#define CS421X_DMIC_PIN_NID 0x09 /* Port E */
#define CS421X_SPDIF_PIN_NID 0x0A /* Port H */
#define CS421X_IDX_DEV_CFG 0x01
#define CS421X_IDX_ADC_CFG 0x02
#define CS421X_IDX_DAC_CFG 0x03
#define CS421X_IDX_SPK_CTL 0x04
#define SPDIF_EVENT 0x04
/* Cirrus Logic CS4213 is like CS4210 but does not have SPDIF input/output */
#define CS4213_VENDOR_NID 0x09
static inline int cs_vendor_coef_get(struct hda_codec *codec, unsigned int idx)
{
struct cs_spec *spec = codec->spec;
snd_hda_codec_write(codec, spec->vendor_nid, 0,
AC_VERB_SET_COEF_INDEX, idx);
return snd_hda_codec_read(codec, spec->vendor_nid, 0,
AC_VERB_GET_PROC_COEF, 0);
}
static inline void cs_vendor_coef_set(struct hda_codec *codec, unsigned int idx,
unsigned int coef)
{
struct cs_spec *spec = codec->spec;
snd_hda_codec_write(codec, spec->vendor_nid, 0,
AC_VERB_SET_COEF_INDEX, idx);
snd_hda_codec_write(codec, spec->vendor_nid, 0,
AC_VERB_SET_PROC_COEF, coef);
}
#define HP_EVENT 1
#define MIC_EVENT 2
/*
* PCM callbacks
*/
static int cs_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
hinfo);
}
static int cs_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
stream_tag, format, substream);
}
static int cs_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
}
/*
* Digital out
*/
static int cs_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_dig_open(codec, &spec->multiout);
}
static int cs_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_dig_close(codec, &spec->multiout);
}
static int cs_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
format, substream);
}
static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
}
static void cs_update_input_select(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
if (spec->cur_adc)
snd_hda_codec_write(codec, spec->cur_adc, 0,
AC_VERB_SET_CONNECT_SEL,
spec->adc_idx[spec->cur_input]);
}
/*
* Analog capture
*/
static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
spec->cur_adc = spec->adc_nid[spec->cur_input];
spec->cur_adc_stream_tag = stream_tag;
spec->cur_adc_format = format;
cs_update_input_select(codec);
snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
return 0;
}
static int cs_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct cs_spec *spec = codec->spec;
snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
spec->cur_adc = 0;
return 0;
}
/*
*/
static const struct hda_pcm_stream cs_pcm_analog_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.ops = {
.open = cs_playback_pcm_open,
.prepare = cs_playback_pcm_prepare,
.cleanup = cs_playback_pcm_cleanup
},
};
static const struct hda_pcm_stream cs_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.ops = {
.prepare = cs_capture_pcm_prepare,
.cleanup = cs_capture_pcm_cleanup
},
};
static const struct hda_pcm_stream cs_pcm_digital_playback = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
.ops = {
.open = cs_dig_playback_pcm_open,
.close = cs_dig_playback_pcm_close,
.prepare = cs_dig_playback_pcm_prepare,
.cleanup = cs_dig_playback_pcm_cleanup
},
};
static const struct hda_pcm_stream cs_pcm_digital_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
};
static int cs_build_pcms(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct hda_pcm *info = spec->pcm_rec;
codec->pcm_info = info;
codec->num_pcms = 0;
info->name = "Cirrus Analog";
info->stream[SNDRV_PCM_STREAM_PLAYBACK] = cs_pcm_analog_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dac_nid[0];
info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
spec->multiout.max_channels;
info->stream[SNDRV_PCM_STREAM_CAPTURE] = cs_pcm_analog_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
spec->adc_nid[spec->cur_input];
codec->num_pcms++;
if (!spec->multiout.dig_out_nid && !spec->dig_in)
return 0;
info++;
info->name = "Cirrus Digital";
info->pcm_type = spec->autocfg.dig_out_type[0];
if (!info->pcm_type)
info->pcm_type = HDA_PCM_TYPE_SPDIF;
if (spec->multiout.dig_out_nid) {
info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
cs_pcm_digital_playback;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
spec->multiout.dig_out_nid;
}
if (spec->dig_in) {
info->stream[SNDRV_PCM_STREAM_CAPTURE] =
cs_pcm_digital_capture;
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
}
codec->num_pcms++;
return 0;
}
/*
* parse codec topology
*/
static hda_nid_t get_dac(struct hda_codec *codec, hda_nid_t pin)
{
hda_nid_t dac;
if (!pin)
return 0;
if (snd_hda_get_connections(codec, pin, &dac, 1) != 1)
return 0;
return dac;
}
static int is_ext_mic(struct hda_codec *codec, unsigned int idx)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t pin = cfg->inputs[idx].pin;
unsigned int val;
if (!is_jack_detectable(codec, pin))
return 0;
val = snd_hda_codec_get_pincfg(codec, pin);
return (snd_hda_get_input_pin_attr(val) != INPUT_PIN_ATTR_INT);
}
static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
unsigned int *idxp)
{
int i, idx;
hda_nid_t nid;
nid = codec->start_nid;
for (i = 0; i < codec->num_nodes; i++, nid++) {
unsigned int type;
type = get_wcaps_type(get_wcaps(codec, nid));
if (type != AC_WID_AUD_IN)
continue;
idx = snd_hda_get_conn_index(codec, nid, pin, false);
if (idx >= 0) {
*idxp = idx;
return nid;
}
}
return 0;
}
static int is_active_pin(struct hda_codec *codec, hda_nid_t nid)
{
unsigned int val;
val = snd_hda_codec_get_pincfg(codec, nid);
return (get_defcfg_connect(val) != AC_JACK_PORT_NONE);
}
static int parse_output(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i, extra_nids;
hda_nid_t dac;
for (i = 0; i < cfg->line_outs; i++) {
dac = get_dac(codec, cfg->line_out_pins[i]);
if (!dac)
break;
spec->dac_nid[i] = dac;
}
spec->multiout.num_dacs = i;
spec->multiout.dac_nids = spec->dac_nid;
spec->multiout.max_channels = i * 2;
/* add HP and speakers */
extra_nids = 0;
for (i = 0; i < cfg->hp_outs; i++) {
dac = get_dac(codec, cfg->hp_pins[i]);
if (!dac)
break;
if (!i)
spec->multiout.hp_nid = dac;
else
spec->multiout.extra_out_nid[extra_nids++] = dac;
}
for (i = 0; i < cfg->speaker_outs; i++) {
dac = get_dac(codec, cfg->speaker_pins[i]);
if (!dac)
break;
spec->multiout.extra_out_nid[extra_nids++] = dac;
}
if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
cfg->speaker_outs = cfg->line_outs;
memcpy(cfg->speaker_pins, cfg->line_out_pins,
sizeof(cfg->speaker_pins));
cfg->line_outs = 0;
}
return 0;
}
static int parse_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
for (i = 0; i < cfg->num_inputs; i++) {
hda_nid_t pin = cfg->inputs[i].pin;
spec->input_idx[spec->num_inputs] = i;
spec->capsrc_idx[i] = spec->num_inputs++;
spec->cur_input = i;
spec->adc_nid[i] = get_adc(codec, pin, &spec->adc_idx[i]);
}
if (!spec->num_inputs)
return 0;
/* check whether the automatic mic switch is available */
if (spec->num_inputs == 2 &&
cfg->inputs[0].type == AUTO_PIN_MIC &&
cfg->inputs[1].type == AUTO_PIN_MIC) {
if (is_ext_mic(codec, cfg->inputs[0].pin)) {
if (!is_ext_mic(codec, cfg->inputs[1].pin)) {
spec->mic_detect = 1;
spec->automic_idx = 0;
}
} else {
if (is_ext_mic(codec, cfg->inputs[1].pin)) {
spec->mic_detect = 1;
spec->automic_idx = 1;
}
}
}
return 0;
}
static int parse_digital_output(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t nid;
if (!cfg->dig_outs)
return 0;
if (snd_hda_get_connections(codec, cfg->dig_out_pins[0], &nid, 1) < 1)
return 0;
spec->multiout.dig_out_nid = nid;
spec->multiout.share_spdif = 1;
if (cfg->dig_outs > 1 &&
snd_hda_get_connections(codec, cfg->dig_out_pins[1], &nid, 1) > 0) {
spec->slave_dig_outs[0] = nid;
codec->slave_dig_outs = spec->slave_dig_outs;
}
return 0;
}
static int parse_digital_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int idx;
if (cfg->dig_in_pin)
spec->dig_in = get_adc(codec, cfg->dig_in_pin, &idx);
return 0;
}
/*
* create mixer controls
*/
static const char * const dir_sfx[2] = { "Playback", "Capture" };
static int add_mute(struct hda_codec *codec, const char *name, int index,
unsigned int pval, int dir, struct snd_kcontrol **kctlp)
{
char tmp[44];
struct snd_kcontrol_new knew =
HDA_CODEC_MUTE_IDX(tmp, index, 0, 0, HDA_OUTPUT);
knew.private_value = pval;
snprintf(tmp, sizeof(tmp), "%s %s Switch", name, dir_sfx[dir]);
*kctlp = snd_ctl_new1(&knew, codec);
(*kctlp)->id.subdevice = HDA_SUBDEV_AMP_FLAG;
return snd_hda_ctl_add(codec, 0, *kctlp);
}
static int add_volume(struct hda_codec *codec, const char *name,
int index, unsigned int pval, int dir,
struct snd_kcontrol **kctlp)
{
char tmp[44];
struct snd_kcontrol_new knew =
HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
knew.private_value = pval;
snprintf(tmp, sizeof(tmp), "%s %s Volume", name, dir_sfx[dir]);
*kctlp = snd_ctl_new1(&knew, codec);
(*kctlp)->id.subdevice = HDA_SUBDEV_AMP_FLAG;
return snd_hda_ctl_add(codec, 0, *kctlp);
}
static void fix_volume_caps(struct hda_codec *codec, hda_nid_t dac)
{
unsigned int caps;
/* set the upper-limit for mixer amp to 0dB */
caps = query_amp_caps(codec, dac, HDA_OUTPUT);
caps &= ~(0x7f << AC_AMPCAP_NUM_STEPS_SHIFT);
caps |= ((caps >> AC_AMPCAP_OFFSET_SHIFT) & 0x7f)
<< AC_AMPCAP_NUM_STEPS_SHIFT;
snd_hda_override_amp_caps(codec, dac, HDA_OUTPUT, caps);
}
static int add_vmaster(struct hda_codec *codec, hda_nid_t dac)
{
struct cs_spec *spec = codec->spec;
unsigned int tlv[4];
int err;
spec->vmaster_sw =
snd_ctl_make_virtual_master("Master Playback Switch", NULL);
err = snd_hda_ctl_add(codec, dac, spec->vmaster_sw);
if (err < 0)
return err;
snd_hda_set_vmaster_tlv(codec, dac, HDA_OUTPUT, tlv);
spec->vmaster_vol =
snd_ctl_make_virtual_master("Master Playback Volume", tlv);
err = snd_hda_ctl_add(codec, dac, spec->vmaster_vol);
if (err < 0)
return err;
return 0;
}
static int add_output(struct hda_codec *codec, hda_nid_t dac, int idx,
int num_ctls, int type)
{
struct cs_spec *spec = codec->spec;
const char *name;
int err, index;
struct snd_kcontrol *kctl;
static const char * const speakers[] = {
"Front Speaker", "Surround Speaker", "Bass Speaker"
};
static const char * const line_outs[] = {
"Front Line Out", "Surround Line Out", "Bass Line Out"
};
fix_volume_caps(codec, dac);
if (!spec->vmaster_sw) {
err = add_vmaster(codec, dac);
if (err < 0)
return err;
}
index = 0;
switch (type) {
case AUTO_PIN_HP_OUT:
name = "Headphone";
index = idx;
break;
case AUTO_PIN_SPEAKER_OUT:
if (num_ctls > 1)
name = speakers[idx];
else
name = "Speaker";
break;
default:
if (num_ctls > 1)
name = line_outs[idx];
else
name = "Line Out";
break;
}
err = add_mute(codec, name, index,
HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
if (err < 0)
return err;
err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
if (err < 0)
return err;
err = add_volume(codec, name, index,
HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
if (err < 0)
return err;
err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
if (err < 0)
return err;
return 0;
}
static int build_output(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i, err;
for (i = 0; i < cfg->line_outs; i++) {
err = add_output(codec, get_dac(codec, cfg->line_out_pins[i]),
i, cfg->line_outs, cfg->line_out_type);
if (err < 0)
return err;
}
for (i = 0; i < cfg->hp_outs; i++) {
err = add_output(codec, get_dac(codec, cfg->hp_pins[i]),
i, cfg->hp_outs, AUTO_PIN_HP_OUT);
if (err < 0)
return err;
}
for (i = 0; i < cfg->speaker_outs; i++) {
err = add_output(codec, get_dac(codec, cfg->speaker_pins[i]),
i, cfg->speaker_outs, AUTO_PIN_SPEAKER_OUT);
if (err < 0)
return err;
}
return 0;
}
/*
*/
static const struct snd_kcontrol_new cs_capture_ctls[] = {
HDA_BIND_SW("Capture Switch", 0),
HDA_BIND_VOL("Capture Volume", 0),
};
static int change_cur_input(struct hda_codec *codec, unsigned int idx,
int force)
{
struct cs_spec *spec = codec->spec;
if (spec->cur_input == idx && !force)
return 0;
if (spec->cur_adc && spec->cur_adc != spec->adc_nid[idx]) {
/* stream is running, let's swap the current ADC */
__snd_hda_codec_cleanup_stream(codec, spec->cur_adc, 1);
spec->cur_adc = spec->adc_nid[idx];
snd_hda_codec_setup_stream(codec, spec->cur_adc,
spec->cur_adc_stream_tag, 0,
spec->cur_adc_format);
}
spec->cur_input = idx;
cs_update_input_select(codec);
return 1;
}
static int cs_capture_source_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
unsigned int idx;
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
uinfo->value.enumerated.items = spec->num_inputs;
if (uinfo->value.enumerated.item >= spec->num_inputs)
uinfo->value.enumerated.item = spec->num_inputs - 1;
idx = spec->input_idx[uinfo->value.enumerated.item];
snd_hda_get_pin_label(codec, cfg->inputs[idx].pin, cfg,
uinfo->value.enumerated.name,
sizeof(uinfo->value.enumerated.name), NULL);
return 0;
}
static int cs_capture_source_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->capsrc_idx[spec->cur_input];
return 0;
}
static int cs_capture_source_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
unsigned int idx = ucontrol->value.enumerated.item[0];
if (idx >= spec->num_inputs)
return -EINVAL;
idx = spec->input_idx[idx];
return change_cur_input(codec, idx, 0);
}
static const struct snd_kcontrol_new cs_capture_source = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = cs_capture_source_info,
.get = cs_capture_source_get,
.put = cs_capture_source_put,
};
static const struct hda_bind_ctls *make_bind_capture(struct hda_codec *codec,
struct hda_ctl_ops *ops)
{
struct cs_spec *spec = codec->spec;
struct hda_bind_ctls *bind;
int i, n;
bind = kzalloc(sizeof(*bind) + sizeof(long) * (spec->num_inputs + 1),
GFP_KERNEL);
if (!bind)
return NULL;
bind->ops = ops;
n = 0;
for (i = 0; i < AUTO_PIN_LAST; i++) {
if (!spec->adc_nid[i])
continue;
bind->values[n++] =
HDA_COMPOSE_AMP_VAL(spec->adc_nid[i], 3,
spec->adc_idx[i], HDA_INPUT);
}
return bind;
}
/* add a (input-boost) volume control to the given input pin */
static int add_input_volume_control(struct hda_codec *codec,
struct auto_pin_cfg *cfg,
int item)
{
hda_nid_t pin = cfg->inputs[item].pin;
u32 caps;
const char *label;
struct snd_kcontrol *kctl;
if (!(get_wcaps(codec, pin) & AC_WCAP_IN_AMP))
return 0;
caps = query_amp_caps(codec, pin, HDA_INPUT);
caps = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT;
if (caps <= 1)
return 0;
label = hda_get_autocfg_input_label(codec, cfg, item);
return add_volume(codec, label, 0,
HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_INPUT), 1, &kctl);
}
static int build_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int i, err;
if (!spec->num_inputs)
return 0;
/* make bind-capture */
spec->capture_bind[0] = make_bind_capture(codec, &snd_hda_bind_sw);
spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
for (i = 0; i < 2; i++) {
struct snd_kcontrol *kctl;
int n;
if (!spec->capture_bind[i])
return -ENOMEM;
kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
if (!kctl)
return -ENOMEM;
kctl->private_value = (long)spec->capture_bind[i];
err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
for (n = 0; n < AUTO_PIN_LAST; n++) {
if (!spec->adc_nid[n])
continue;
err = snd_hda_add_nid(codec, kctl, 0, spec->adc_nid[n]);
if (err < 0)
return err;
}
}
if (spec->num_inputs > 1 && !spec->mic_detect) {
err = snd_hda_ctl_add(codec, 0,
snd_ctl_new1(&cs_capture_source, codec));
if (err < 0)
return err;
}
for (i = 0; i < spec->num_inputs; i++) {
err = add_input_volume_control(codec, &spec->autocfg, i);
if (err < 0)
return err;
}
return 0;
}
/*
*/
static int build_digital_output(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
if (!spec->multiout.dig_out_nid)
return 0;
err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid,
spec->multiout.dig_out_nid);
if (err < 0)
return err;
err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
if (err < 0)
return err;
return 0;
}
static int build_digital_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
if (spec->dig_in)
return snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
return 0;
}
/*
* auto-mute and auto-mic switching
* CS421x auto-output redirecting
* HP/SPK/SPDIF
*/
static void cs_automute(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
unsigned int hp_present;
unsigned int spdif_present;
hda_nid_t nid;
int i;
spdif_present = 0;
if (cfg->dig_outs) {
nid = cfg->dig_out_pins[0];
if (is_jack_detectable(codec, nid)) {
/*
TODO: SPDIF output redirect when SENSE_B is enabled.
Shared (SENSE_A) jack (e.g HP/mini-TOSLINK)
assumed.
*/
if (snd_hda_jack_detect(codec, nid)
/* && spec->sense_b */)
spdif_present = 1;
}
}
hp_present = 0;
for (i = 0; i < cfg->hp_outs; i++) {
nid = cfg->hp_pins[i];
if (!is_jack_detectable(codec, nid))
continue;
hp_present = snd_hda_jack_detect(codec, nid);
if (hp_present)
break;
}
/* mute speakers if spdif or hp jack is plugged in */
for (i = 0; i < cfg->speaker_outs; i++) {
int pin_ctl = hp_present ? 0 : PIN_OUT;
/* detect on spdif is specific to CS4210 */
if (spdif_present && (spec->vendor_nid == CS4210_VENDOR_NID))
pin_ctl = 0;
nid = cfg->speaker_pins[i];
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
}
if (spec->gpio_eapd_hp) {
unsigned int gpio = hp_present ?
spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, gpio);
}
/* specific to CS4210 */
if (spec->vendor_nid == CS4210_VENDOR_NID) {
/* mute HPs if spdif jack (SENSE_B) is present */
for (i = 0; i < cfg->hp_outs; i++) {
nid = cfg->hp_pins[i];
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
(spdif_present && spec->sense_b) ? 0 : PIN_HP);
}
/* SPDIF TX on/off */
if (cfg->dig_outs) {
nid = cfg->dig_out_pins[0];
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
spdif_present ? PIN_OUT : 0);
}
/* Update board GPIOs if neccessary ... */
}
}
/*
* Auto-input redirect for CS421x
* Switch max 3 inputs of a single ADC (nid 3)
*/
static void cs_automic(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
hda_nid_t nid;
unsigned int present;
nid = cfg->inputs[spec->automic_idx].pin;
present = snd_hda_jack_detect(codec, nid);
/* specific to CS421x, single ADC */
if (spec->vendor_nid == CS420X_VENDOR_NID) {
if (present)
change_cur_input(codec, spec->automic_idx, 0);
else
change_cur_input(codec, !spec->automic_idx, 0);
} else {
if (present) {
if (spec->cur_input != spec->automic_idx) {
spec->last_input = spec->cur_input;
spec->cur_input = spec->automic_idx;
}
} else {
spec->cur_input = spec->last_input;
}
cs_update_input_select(codec);
}
}
/*
*/
static void init_output(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
/* mute first */
for (i = 0; i < spec->multiout.num_dacs; i++)
snd_hda_codec_write(codec, spec->multiout.dac_nids[i], 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
if (spec->multiout.hp_nid)
snd_hda_codec_write(codec, spec->multiout.hp_nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
for (i = 0; i < ARRAY_SIZE(spec->multiout.extra_out_nid); i++) {
if (!spec->multiout.extra_out_nid[i])
break;
snd_hda_codec_write(codec, spec->multiout.extra_out_nid[i], 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
}
/* set appropriate pin controls */
for (i = 0; i < cfg->line_outs; i++)
snd_hda_codec_write(codec, cfg->line_out_pins[i], 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
/* HP */
for (i = 0; i < cfg->hp_outs; i++) {
hda_nid_t nid = cfg->hp_pins[i];
snd_hda_codec_write(codec, nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
if (!cfg->speaker_outs)
continue;
if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
snd_hda_jack_detect_enable(codec, nid, HP_EVENT);
spec->hp_detect = 1;
}
}
/* Speaker */
for (i = 0; i < cfg->speaker_outs; i++)
snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
/* SPDIF is enabled on presence detect for CS421x */
if (spec->hp_detect || spec->spdif_detect)
cs_automute(codec);
}
static void init_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
unsigned int coef;
int i;
for (i = 0; i < cfg->num_inputs; i++) {
unsigned int ctl;
hda_nid_t pin = cfg->inputs[i].pin;
if (!spec->adc_nid[i])
continue;
/* set appropriate pin control and mute first */
ctl = PIN_IN;
if (cfg->inputs[i].type == AUTO_PIN_MIC) {
unsigned int caps = snd_hda_query_pin_caps(codec, pin);
caps >>= AC_PINCAP_VREF_SHIFT;
if (caps & AC_PINCAP_VREF_80)
ctl = PIN_VREF80;
}
snd_hda_codec_write(codec, pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, ctl);
snd_hda_codec_write(codec, spec->adc_nid[i], 0,
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_MUTE(spec->adc_idx[i]));
if (spec->mic_detect && spec->automic_idx == i)
snd_hda_jack_detect_enable(codec, pin, MIC_EVENT);
}
/* CS420x has multiple ADC, CS421x has single ADC */
if (spec->vendor_nid == CS420X_VENDOR_NID) {
change_cur_input(codec, spec->cur_input, 1);
if (spec->mic_detect)
cs_automic(codec);
coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
if (is_active_pin(codec, CS_DMIC2_PIN_NID))
coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
if (is_active_pin(codec, CS_DMIC1_PIN_NID))
coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
* No effect if SPDIF_OUT2 is
* selected in IDX_SPDIF_CTL.
*/
cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
} else {
if (spec->mic_detect)
cs_automic(codec);
else {
spec->cur_adc = spec->adc_nid[spec->cur_input];
cs_update_input_select(codec);
}
}
}
static const struct hda_verb cs_coef_init_verbs[] = {
{0x11, AC_VERB_SET_PROC_STATE, 1},
{0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
{0x11, AC_VERB_SET_PROC_COEF,
(0x002a /* DAC1/2/3 SZCMode Soft Ramp */
| 0x0040 /* Mute DACs on FIFO error */
| 0x1000 /* Enable DACs High Pass Filter */
| 0x0400 /* Disable Coefficient Auto increment */
)},
/* Beep */
{0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
{0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
{} /* terminator */
};
/* Errata: CS4207 rev C0/C1/C2 Silicon
*
* http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
*
* 6. At high temperature (TA > +85°C), the digital supply current (IVD)
* may be excessive (up to an additional 200 μA), which is most easily
* observed while the part is being held in reset (RESET# active low).
*
* Root Cause: At initial powerup of the device, the logic that drives
* the clock and write enable to the S/PDIF SRC RAMs is not properly
* initialized.
* Certain random patterns will cause a steady leakage current in those
* RAM cells. The issue will resolve once the SRCs are used (turned on).
*
* Workaround: The following verb sequence briefly turns on the S/PDIF SRC
* blocks, which will alleviate the issue.
*/
static const struct hda_verb cs_errata_init_verbs[] = {
{0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
{0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
{0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
{0x11, AC_VERB_SET_PROC_COEF, 0x9999},
{0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
{0x11, AC_VERB_SET_PROC_COEF, 0xa412},
{0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
{0x11, AC_VERB_SET_PROC_COEF, 0x0009},
{0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
{0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
{0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
{0x11, AC_VERB_SET_PROC_COEF, 0x2412},
{0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
{0x11, AC_VERB_SET_PROC_COEF, 0x0000},
{0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
{0x11, AC_VERB_SET_PROC_COEF, 0x0008},
{0x11, AC_VERB_SET_PROC_STATE, 0x00},
#if 0 /* Don't to set to D3 as we are in power-up sequence */
{0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
{0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
/*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
#endif
{} /* terminator */
};
/* SPDIF setup */
static void init_digital(struct hda_codec *codec)
{
unsigned int coef;
coef = 0x0002; /* SRC_MUTE soft-mute on SPDIF (if no lock) */
coef |= 0x0008; /* Replace with mute on error */
if (is_active_pin(codec, CS_DIG_OUT2_PIN_NID))
coef |= 0x4000; /* RX to TX1 or TX2 Loopthru / SPDIF2
* SPDIF_OUT2 is shared with GPIO1 and
* DMIC_SDA2.
*/
cs_vendor_coef_set(codec, IDX_SPDIF_CTL, coef);
}
static int cs_init(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
/* init_verb sequence for C0/C1/C2 errata*/
snd_hda_sequence_write(codec, cs_errata_init_verbs);
snd_hda_sequence_write(codec, cs_coef_init_verbs);
if (spec->gpio_mask) {
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
spec->gpio_mask);
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
spec->gpio_dir);
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
spec->gpio_data);
}
init_output(codec);
init_input(codec);
init_digital(codec);
snd_hda_jack_report_sync(codec);
return 0;
}
static int cs_build_controls(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
err = build_output(codec);
if (err < 0)
return err;
err = build_input(codec);
if (err < 0)
return err;
err = build_digital_output(codec);
if (err < 0)
return err;
err = build_digital_input(codec);
if (err < 0)
return err;
err = cs_init(codec);
if (err < 0)
return err;
err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
if (err < 0)
return err;
return 0;
}
static void cs_free(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
kfree(spec->capture_bind[0]);
kfree(spec->capture_bind[1]);
kfree(codec->spec);
}
static void cs_unsol_event(struct hda_codec *codec, unsigned int res)
{
switch (snd_hda_jack_get_action(codec, res >> 26)) {
case HP_EVENT:
cs_automute(codec);
break;
case MIC_EVENT:
cs_automic(codec);
break;
}
snd_hda_jack_report_sync(codec);
}
static const struct hda_codec_ops cs_patch_ops = {
.build_controls = cs_build_controls,
.build_pcms = cs_build_pcms,
.init = cs_init,
.free = cs_free,
.unsol_event = cs_unsol_event,
};
static int cs_parse_auto_config(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
if (err < 0)
return err;
err = parse_output(codec);
if (err < 0)
return err;
err = parse_input(codec);
if (err < 0)
return err;
err = parse_digital_output(codec);
if (err < 0)
return err;
err = parse_digital_input(codec);
if (err < 0)
return err;
return 0;
}
static const char * const cs420x_models[CS420X_MODELS] = {
[CS420X_MBP53] = "mbp53",
[CS420X_MBP55] = "mbp55",
[CS420X_IMAC27] = "imac27",
[CS420X_IMAC27_122] = "imac27_122",
[CS420X_APPLE] = "apple",
[CS420X_AUTO] = "auto",
};
static const struct snd_pci_quirk cs420x_cfg_tbl[] = {
SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
/* this conflicts with too many other models */
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
{} /* terminator */
};
static const struct snd_pci_quirk cs420x_codec_cfg_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
{} /* terminator */
};
struct cs_pincfg {
hda_nid_t nid;
u32 val;
};
static const struct cs_pincfg mbp53_pincfgs[] = {
{ 0x09, 0x012b4050 },
{ 0x0a, 0x90100141 },
{ 0x0b, 0x90100140 },
{ 0x0c, 0x018b3020 },
{ 0x0d, 0x90a00110 },
{ 0x0e, 0x400000f0 },
{ 0x0f, 0x01cbe030 },
{ 0x10, 0x014be060 },
{ 0x12, 0x400000f0 },
{ 0x15, 0x400000f0 },
{} /* terminator */
};
static const struct cs_pincfg mbp55_pincfgs[] = {
{ 0x09, 0x012b4030 },
{ 0x0a, 0x90100121 },
{ 0x0b, 0x90100120 },
{ 0x0c, 0x400000f0 },
{ 0x0d, 0x90a00110 },
{ 0x0e, 0x400000f0 },
{ 0x0f, 0x400000f0 },
{ 0x10, 0x014be040 },
{ 0x12, 0x400000f0 },
{ 0x15, 0x400000f0 },
{} /* terminator */
};
static const struct cs_pincfg imac27_pincfgs[] = {
{ 0x09, 0x012b4050 },
{ 0x0a, 0x90100140 },
{ 0x0b, 0x90100142 },
{ 0x0c, 0x018b3020 },
{ 0x0d, 0x90a00110 },
{ 0x0e, 0x400000f0 },
{ 0x0f, 0x01cbe030 },
{ 0x10, 0x014be060 },
{ 0x12, 0x01ab9070 },
{ 0x15, 0x400000f0 },
{} /* terminator */
};
static const struct cs_pincfg *cs_pincfgs[CS420X_MODELS] = {
[CS420X_MBP53] = mbp53_pincfgs,
[CS420X_MBP55] = mbp55_pincfgs,
[CS420X_IMAC27] = imac27_pincfgs,
};
static void fix_pincfg(struct hda_codec *codec, int model,
const struct cs_pincfg **pin_configs)
{
const struct cs_pincfg *cfg = pin_configs[model];
if (!cfg)
return;
for (; cfg->nid; cfg++)
snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
}
static int patch_cs420x(struct hda_codec *codec)
{
struct cs_spec *spec;
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
spec->vendor_nid = CS420X_VENDOR_NID;
spec->board_config =
snd_hda_check_board_config(codec, CS420X_MODELS,
cs420x_models, cs420x_cfg_tbl);
if (spec->board_config < 0)
spec->board_config =
snd_hda_check_board_codec_sid_config(codec,
CS420X_MODELS, NULL, cs420x_codec_cfg_tbl);
if (spec->board_config >= 0)
fix_pincfg(codec, spec->board_config, cs_pincfgs);
switch (spec->board_config) {
case CS420X_IMAC27:
case CS420X_MBP53:
case CS420X_MBP55:
case CS420X_APPLE:
spec->gpio_eapd_hp = 2; /* GPIO1 = headphones */
spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
spec->gpio_mask = spec->gpio_dir =
spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
break;
case CS420X_IMAC27_122:
spec->gpio_eapd_hp = 4; /* GPIO2 = headphones */
spec->gpio_eapd_speaker = 8; /* GPIO3 = speakers */
spec->gpio_mask = spec->gpio_dir =
spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
break;
}
err = cs_parse_auto_config(codec);
if (err < 0)
goto error;
codec->patch_ops = cs_patch_ops;
return 0;
error:
kfree(codec->spec);
codec->spec = NULL;
return err;
}
/*
* Cirrus Logic CS4210
*
* 1 DAC => HP(sense) / Speakers,
* 1 ADC <= LineIn(sense) / MicIn / DMicIn,
* 1 SPDIF OUT => SPDIF Trasmitter(sense)
*/
/* CS4210 board names */
static const char *cs421x_models[CS421X_MODELS] = {
[CS421X_CDB4210] = "cdb4210",
};
static const struct snd_pci_quirk cs421x_cfg_tbl[] = {
/* Test Intel board + CDB2410 */
SND_PCI_QUIRK(0x8086, 0x5001, "DP45SG/CDB4210", CS421X_CDB4210),
{} /* terminator */
};
/* CS4210 board pinconfigs */
/* Default CS4210 (CDB4210)*/
static const struct cs_pincfg cdb4210_pincfgs[] = {
{ 0x05, 0x0321401f },
{ 0x06, 0x90170010 },
{ 0x07, 0x03813031 },
{ 0x08, 0xb7a70037 },
{ 0x09, 0xb7a6003e },
{ 0x0a, 0x034510f0 },
{} /* terminator */
};
static const struct cs_pincfg *cs421x_pincfgs[CS421X_MODELS] = {
[CS421X_CDB4210] = cdb4210_pincfgs,
};
static const struct hda_verb cs421x_coef_init_verbs[] = {
{0x0B, AC_VERB_SET_PROC_STATE, 1},
{0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_DEV_CFG},
/*
Disable Coefficient Index Auto-Increment(DAI)=1,
PDREF=0
*/
{0x0B, AC_VERB_SET_PROC_COEF, 0x0001 },
{0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_ADC_CFG},
/* ADC SZCMode = Digital Soft Ramp */
{0x0B, AC_VERB_SET_PROC_COEF, 0x0002 },
{0x0B, AC_VERB_SET_COEF_INDEX, CS421X_IDX_DAC_CFG},
{0x0B, AC_VERB_SET_PROC_COEF,
(0x0002 /* DAC SZCMode = Digital Soft Ramp */
| 0x0004 /* Mute DAC on FIFO error */
| 0x0008 /* Enable DAC High Pass Filter */
)},
{} /* terminator */
};
/* Errata: CS4210 rev A1 Silicon
*
* http://www.cirrus.com/en/pubs/errata/
*
* Description:
* 1. Performance degredation is present in the ADC.
* 2. Speaker output is not completely muted upon HP detect.
* 3. Noise is present when clipping occurs on the amplified
* speaker outputs.
*
* Workaround:
* The following verb sequence written to the registers during
* initialization will correct the issues listed above.
*/
static const struct hda_verb cs421x_coef_init_verbs_A1_silicon_fixes[] = {
{0x0B, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */
{0x0B, AC_VERB_SET_COEF_INDEX, 0x0006},
{0x0B, AC_VERB_SET_PROC_COEF, 0x9999}, /* Test mode: on */
{0x0B, AC_VERB_SET_COEF_INDEX, 0x000A},
{0x0B, AC_VERB_SET_PROC_COEF, 0x14CB}, /* Chop double */
{0x0B, AC_VERB_SET_COEF_INDEX, 0x0011},
{0x0B, AC_VERB_SET_PROC_COEF, 0xA2D0}, /* Increase ADC current */
{0x0B, AC_VERB_SET_COEF_INDEX, 0x001A},
{0x0B, AC_VERB_SET_PROC_COEF, 0x02A9}, /* Mute speaker */
{0x0B, AC_VERB_SET_COEF_INDEX, 0x001B},
{0x0B, AC_VERB_SET_PROC_COEF, 0X1006}, /* Remove noise */
{} /* terminator */
};
/* Speaker Amp Gain is controlled by the vendor widget's coef 4 */
static const DECLARE_TLV_DB_SCALE(cs421x_speaker_boost_db_scale, 900, 300, 0);
static int cs421x_boost_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 3;
return 0;
}
static int cs421x_boost_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] =
cs_vendor_coef_get(codec, CS421X_IDX_SPK_CTL) & 0x0003;
return 0;
}
static int cs421x_boost_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int vol = ucontrol->value.integer.value[0];
unsigned int coef =
cs_vendor_coef_get(codec, CS421X_IDX_SPK_CTL);
unsigned int original_coef = coef;
coef &= ~0x0003;
coef |= (vol & 0x0003);
if (original_coef == coef)
return 0;
else {
cs_vendor_coef_set(codec, CS421X_IDX_SPK_CTL, coef);
return 1;
}
}
static const struct snd_kcontrol_new cs421x_speaker_bost_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
.name = "Speaker Boost Playback Volume",
.info = cs421x_boost_vol_info,
.get = cs421x_boost_vol_get,
.put = cs421x_boost_vol_put,
.tlv = { .p = cs421x_speaker_boost_db_scale },
};
static void cs4210_pinmux_init(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
unsigned int def_conf, coef;
/* GPIO, DMIC_SCL, DMIC_SDA and SENSE_B are multiplexed */
coef = cs_vendor_coef_get(codec, CS421X_IDX_DEV_CFG);
if (spec->gpio_mask)
coef |= 0x0008; /* B1,B2 are GPIOs */
else
coef &= ~0x0008;
if (spec->sense_b)
coef |= 0x0010; /* B2 is SENSE_B, not inverted */
else
coef &= ~0x0010;
cs_vendor_coef_set(codec, CS421X_IDX_DEV_CFG, coef);
if ((spec->gpio_mask || spec->sense_b) &&
is_active_pin(codec, CS421X_DMIC_PIN_NID)) {
/*
GPIO or SENSE_B forced - disconnect the DMIC pin.
*/
def_conf = snd_hda_codec_get_pincfg(codec, CS421X_DMIC_PIN_NID);
def_conf &= ~AC_DEFCFG_PORT_CONN;
def_conf |= (AC_JACK_PORT_NONE << AC_DEFCFG_PORT_CONN_SHIFT);
snd_hda_codec_set_pincfg(codec, CS421X_DMIC_PIN_NID, def_conf);
}
}
static void init_cs421x_digital(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
for (i = 0; i < cfg->dig_outs; i++) {
hda_nid_t nid = cfg->dig_out_pins[i];
if (!cfg->speaker_outs)
continue;
if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
snd_hda_jack_detect_enable(codec, nid, SPDIF_EVENT);
spec->spdif_detect = 1;
}
}
}
static int cs421x_init(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
if (spec->vendor_nid == CS4210_VENDOR_NID) {
snd_hda_sequence_write(codec, cs421x_coef_init_verbs);
snd_hda_sequence_write(codec, cs421x_coef_init_verbs_A1_silicon_fixes);
cs4210_pinmux_init(codec);
}
if (spec->gpio_mask) {
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
spec->gpio_mask);
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
spec->gpio_dir);
snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
spec->gpio_data);
}
init_output(codec);
init_input(codec);
init_cs421x_digital(codec);
snd_hda_jack_report_sync(codec);
return 0;
}
/*
* CS4210 Input MUX (1 ADC)
*/
static int cs421x_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
return snd_hda_input_mux_info(&spec->input_mux, uinfo);
}
static int cs421x_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
ucontrol->value.enumerated.item[0] = spec->cur_input;
return 0;
}
static int cs421x_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct cs_spec *spec = codec->spec;
return snd_hda_input_mux_put(codec, &spec->input_mux, ucontrol,
spec->adc_nid[0], &spec->cur_input);
}
static struct snd_kcontrol_new cs421x_capture_source = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = cs421x_mux_enum_info,
.get = cs421x_mux_enum_get,
.put = cs421x_mux_enum_put,
};
static int cs421x_add_input_volume_control(struct hda_codec *codec, int item)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
const struct hda_input_mux *imux = &spec->input_mux;
hda_nid_t pin = cfg->inputs[item].pin;
struct snd_kcontrol *kctl;
u32 caps;
if (!(get_wcaps(codec, pin) & AC_WCAP_IN_AMP))
return 0;
caps = query_amp_caps(codec, pin, HDA_INPUT);
caps = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT;
if (caps <= 1)
return 0;
return add_volume(codec, imux->items[item].label, 0,
HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_INPUT), 1, &kctl);
}
/* add a (input-boost) volume control to the given input pin */
static int build_cs421x_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
struct hda_input_mux *imux = &spec->input_mux;
int i, err, type_idx;
const char *label;
if (!spec->num_inputs)
return 0;
/* make bind-capture */
spec->capture_bind[0] = make_bind_capture(codec, &snd_hda_bind_sw);
spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
for (i = 0; i < 2; i++) {
struct snd_kcontrol *kctl;
int n;
if (!spec->capture_bind[i])
return -ENOMEM;
kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
if (!kctl)
return -ENOMEM;
kctl->private_value = (long)spec->capture_bind[i];
err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
for (n = 0; n < AUTO_PIN_LAST; n++) {
if (!spec->adc_nid[n])
continue;
err = snd_hda_add_nid(codec, kctl, 0, spec->adc_nid[n]);
if (err < 0)
return err;
}
}
/* Add Input MUX Items + Capture Volume/Switch */
for (i = 0; i < spec->num_inputs; i++) {
label = hda_get_autocfg_input_label(codec, cfg, i);
snd_hda_add_imux_item(imux, label, spec->adc_idx[i], &type_idx);
err = cs421x_add_input_volume_control(codec, i);
if (err < 0)
return err;
}
/*
Add 'Capture Source' Switch if
* 2 inputs and no mic detec
* 3 inputs
*/
if ((spec->num_inputs == 2 && !spec->mic_detect) ||
(spec->num_inputs == 3)) {
err = snd_hda_ctl_add(codec, spec->adc_nid[0],
snd_ctl_new1(&cs421x_capture_source, codec));
if (err < 0)
return err;
}
return 0;
}
/* Single DAC (Mute/Gain) */
static int build_cs421x_output(struct hda_codec *codec)
{
hda_nid_t dac = CS4210_DAC_NID;
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
struct snd_kcontrol *kctl;
int err;
char *name = "Master";
fix_volume_caps(codec, dac);
err = add_mute(codec, name, 0,
HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
if (err < 0)
return err;
err = add_volume(codec, name, 0,
HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
if (err < 0)
return err;
if (cfg->speaker_outs && (spec->vendor_nid == CS4210_VENDOR_NID)) {
err = snd_hda_ctl_add(codec, 0,
snd_ctl_new1(&cs421x_speaker_bost_ctl, codec));
if (err < 0)
return err;
}
return err;
}
static int cs421x_build_controls(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
err = build_cs421x_output(codec);
if (err < 0)
return err;
err = build_cs421x_input(codec);
if (err < 0)
return err;
err = build_digital_output(codec);
if (err < 0)
return err;
err = cs421x_init(codec);
if (err < 0)
return err;
err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
if (err < 0)
return err;
return 0;
}
static void cs421x_unsol_event(struct hda_codec *codec, unsigned int res)
{
switch (snd_hda_jack_get_action(codec, res >> 26)) {
case HP_EVENT:
case SPDIF_EVENT:
cs_automute(codec);
break;
case MIC_EVENT:
cs_automic(codec);
break;
}
snd_hda_jack_report_sync(codec);
}
static int parse_cs421x_input(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
for (i = 0; i < cfg->num_inputs; i++) {
hda_nid_t pin = cfg->inputs[i].pin;
spec->adc_nid[i] = get_adc(codec, pin, &spec->adc_idx[i]);
spec->cur_input = spec->last_input = i;
spec->num_inputs++;
/* check whether the automatic mic switch is available */
if (is_ext_mic(codec, i) && cfg->num_inputs >= 2) {
spec->mic_detect = 1;
spec->automic_idx = i;
}
}
return 0;
}
static int cs421x_parse_auto_config(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
int err;
err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
if (err < 0)
return err;
err = parse_output(codec);
if (err < 0)
return err;
err = parse_cs421x_input(codec);
if (err < 0)
return err;
err = parse_digital_output(codec);
if (err < 0)
return err;
return 0;
}
#ifdef CONFIG_PM
/*
Manage PDREF, when transitioning to D3hot
(DAC,ADC) -> D3, PDREF=1, AFG->D3
*/
static int cs421x_suspend(struct hda_codec *codec, pm_message_t state)
{
struct cs_spec *spec = codec->spec;
unsigned int coef;
snd_hda_shutup_pins(codec);
snd_hda_codec_write(codec, CS4210_DAC_NID, 0,
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
snd_hda_codec_write(codec, CS4210_ADC_NID, 0,
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
if (spec->vendor_nid == CS4210_VENDOR_NID) {
coef = cs_vendor_coef_get(codec, CS421X_IDX_DEV_CFG);
coef |= 0x0004; /* PDREF */
cs_vendor_coef_set(codec, CS421X_IDX_DEV_CFG, coef);
}
return 0;
}
#endif
static struct hda_codec_ops cs421x_patch_ops = {
.build_controls = cs421x_build_controls,
.build_pcms = cs_build_pcms,
.init = cs421x_init,
.free = cs_free,
.unsol_event = cs421x_unsol_event,
#ifdef CONFIG_PM
.suspend = cs421x_suspend,
#endif
};
static int patch_cs4210(struct hda_codec *codec)
{
struct cs_spec *spec;
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
spec->vendor_nid = CS4210_VENDOR_NID;
spec->board_config =
snd_hda_check_board_config(codec, CS421X_MODELS,
cs421x_models, cs421x_cfg_tbl);
if (spec->board_config >= 0)
fix_pincfg(codec, spec->board_config, cs421x_pincfgs);
/*
Setup GPIO/SENSE for each board (if used)
*/
switch (spec->board_config) {
case CS421X_CDB4210:
snd_printd("CS4210 board: %s\n",
cs421x_models[spec->board_config]);
/* spec->gpio_mask = 3;
spec->gpio_dir = 3;
spec->gpio_data = 3;
*/
spec->sense_b = 1;
break;
}
/*
Update the GPIO/DMIC/SENSE_B pinmux before the configuration
is auto-parsed. If GPIO or SENSE_B is forced, DMIC input
is disabled.
*/
cs4210_pinmux_init(codec);
err = cs421x_parse_auto_config(codec);
if (err < 0)
goto error;
codec->patch_ops = cs421x_patch_ops;
return 0;
error:
kfree(codec->spec);
codec->spec = NULL;
return err;
}
static int patch_cs4213(struct hda_codec *codec)
{
struct cs_spec *spec;
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
codec->spec = spec;
spec->vendor_nid = CS4213_VENDOR_NID;
err = cs421x_parse_auto_config(codec);
if (err < 0)
goto error;
codec->patch_ops = cs421x_patch_ops;
return 0;
error:
kfree(codec->spec);
codec->spec = NULL;
return err;
}
/*
* patch entries
*/
static const struct hda_codec_preset snd_hda_preset_cirrus[] = {
{ .id = 0x10134206, .name = "CS4206", .patch = patch_cs420x },
{ .id = 0x10134207, .name = "CS4207", .patch = patch_cs420x },
{ .id = 0x10134210, .name = "CS4210", .patch = patch_cs4210 },
{ .id = 0x10134213, .name = "CS4213", .patch = patch_cs4213 },
{} /* terminator */
};
MODULE_ALIAS("snd-hda-codec-id:10134206");
MODULE_ALIAS("snd-hda-codec-id:10134207");
MODULE_ALIAS("snd-hda-codec-id:10134210");
MODULE_ALIAS("snd-hda-codec-id:10134213");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cirrus Logic HD-audio codec");
static struct hda_codec_preset_list cirrus_list = {
.preset = snd_hda_preset_cirrus,
.owner = THIS_MODULE,
};
static int __init patch_cirrus_init(void)
{
return snd_hda_add_codec_preset(&cirrus_list);
}
static void __exit patch_cirrus_exit(void)
{
snd_hda_delete_codec_preset(&cirrus_list);
}
module_init(patch_cirrus_init)
module_exit(patch_cirrus_exit)
| gpl-2.0 |
chil360/chil360-kernel | arch/arm/mach-pxa/sharpsl_pm.c | 4828 | 27691 | /*
* Battery and Power Management code for the Sharp SL-C7xx and SL-Cxx00
* series of PDAs
*
* Copyright (c) 2004-2005 Richard Purdie
*
* Based on code written by Sharp for 2.4 kernels
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/apm-emulation.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/leds.h>
#include <linux/suspend.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <mach/pm.h>
#include <mach/pxa2xx-regs.h>
#include <mach/regs-rtc.h>
#include <mach/sharpsl_pm.h>
/*
* Constants
*/
#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
#define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP 10 /* 10 msec */
#define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT 10 /* 10 msec */
#define SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN 10 /* 10 msec */
#define SHARPSL_CHARGE_WAIT_TIME 15 /* 15 msec */
#define SHARPSL_CHARGE_CO_CHECK_TIME 5 /* 5 msec */
#define SHARPSL_CHARGE_RETRY_CNT 1 /* eqv. 10 min */
/*
* Prototypes
*/
#ifdef CONFIG_PM
static int sharpsl_off_charge_battery(void);
static int sharpsl_check_battery_voltage(void);
static int sharpsl_fatal_check(void);
#endif
static int sharpsl_check_battery_temp(void);
static int sharpsl_ac_check(void);
static int sharpsl_average_value(int ad);
static void sharpsl_average_clear(void);
static void sharpsl_charge_toggle(struct work_struct *private_);
static void sharpsl_battery_thread(struct work_struct *private_);
/*
* Variables
*/
struct sharpsl_pm_status sharpsl_pm;
static DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
static DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
struct battery_thresh sharpsl_battery_levels_acin[] = {
{ 213, 100},
{ 212, 98},
{ 211, 95},
{ 210, 93},
{ 209, 90},
{ 208, 88},
{ 207, 85},
{ 206, 83},
{ 205, 80},
{ 204, 78},
{ 203, 75},
{ 202, 73},
{ 201, 70},
{ 200, 68},
{ 199, 65},
{ 198, 63},
{ 197, 60},
{ 196, 58},
{ 195, 55},
{ 194, 53},
{ 193, 50},
{ 192, 48},
{ 192, 45},
{ 191, 43},
{ 191, 40},
{ 190, 38},
{ 190, 35},
{ 189, 33},
{ 188, 30},
{ 187, 28},
{ 186, 25},
{ 185, 23},
{ 184, 20},
{ 183, 18},
{ 182, 15},
{ 181, 13},
{ 180, 10},
{ 179, 8},
{ 178, 5},
{ 0, 0},
};
struct battery_thresh sharpsl_battery_levels_noac[] = {
{ 213, 100},
{ 212, 98},
{ 211, 95},
{ 210, 93},
{ 209, 90},
{ 208, 88},
{ 207, 85},
{ 206, 83},
{ 205, 80},
{ 204, 78},
{ 203, 75},
{ 202, 73},
{ 201, 70},
{ 200, 68},
{ 199, 65},
{ 198, 63},
{ 197, 60},
{ 196, 58},
{ 195, 55},
{ 194, 53},
{ 193, 50},
{ 192, 48},
{ 191, 45},
{ 190, 43},
{ 189, 40},
{ 188, 38},
{ 187, 35},
{ 186, 33},
{ 185, 30},
{ 184, 28},
{ 183, 25},
{ 182, 23},
{ 181, 20},
{ 180, 18},
{ 179, 15},
{ 178, 13},
{ 177, 10},
{ 176, 8},
{ 175, 5},
{ 0, 0},
};
/* MAX1111 Commands */
#define MAXCTRL_PD0 (1u << 0)
#define MAXCTRL_PD1 (1u << 1)
#define MAXCTRL_SGL (1u << 2)
#define MAXCTRL_UNI (1u << 3)
#define MAXCTRL_SEL_SH 4
#define MAXCTRL_STR (1u << 7)
extern int max1111_read_channel(int);
/*
* Read MAX1111 ADC
*/
int sharpsl_pm_pxa_read_max1111(int channel)
{
/* Ugly, better move this function into another module */
if (machine_is_tosa())
return 0;
/* max1111 accepts channels from 0-3, however,
* it is encoded from 0-7 here in the code.
*/
return max1111_read_channel(channel >> 1);
}
static int get_percentage(int voltage)
{
int i = sharpsl_pm.machinfo->bat_levels - 1;
int bl_status = sharpsl_pm.machinfo->backlight_get_status ? sharpsl_pm.machinfo->backlight_get_status() : 0;
struct battery_thresh *thresh;
if (sharpsl_pm.charge_mode == CHRG_ON)
thresh = bl_status ? sharpsl_pm.machinfo->bat_levels_acin_bl : sharpsl_pm.machinfo->bat_levels_acin;
else
thresh = bl_status ? sharpsl_pm.machinfo->bat_levels_noac_bl : sharpsl_pm.machinfo->bat_levels_noac;
while (i > 0 && (voltage > thresh[i].voltage))
i--;
return thresh[i].percentage;
}
static int get_apm_status(int voltage)
{
int low_thresh, high_thresh;
if (sharpsl_pm.charge_mode == CHRG_ON) {
high_thresh = sharpsl_pm.machinfo->status_high_acin;
low_thresh = sharpsl_pm.machinfo->status_low_acin;
} else {
high_thresh = sharpsl_pm.machinfo->status_high_noac;
low_thresh = sharpsl_pm.machinfo->status_low_noac;
}
if (voltage >= high_thresh)
return APM_BATTERY_STATUS_HIGH;
if (voltage >= low_thresh)
return APM_BATTERY_STATUS_LOW;
return APM_BATTERY_STATUS_CRITICAL;
}
void sharpsl_battery_kick(void)
{
schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
}
EXPORT_SYMBOL(sharpsl_battery_kick);
static void sharpsl_battery_thread(struct work_struct *private_)
{
int voltage, percent, apm_status, i;
if (!sharpsl_pm.machinfo)
return;
sharpsl_pm.battstat.ac_status = (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN) ? APM_AC_ONLINE : APM_AC_OFFLINE);
/* Corgi cannot confirm when battery fully charged so periodically kick! */
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
schedule_delayed_work(&toggle_charger, 0);
for (i = 0; i < 5; i++) {
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
if (voltage > 0)
break;
}
if (voltage <= 0) {
voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage;
dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n");
}
voltage = sharpsl_average_value(voltage);
apm_status = get_apm_status(voltage);
percent = get_percentage(voltage);
/* At low battery voltages, the voltage has a tendency to start
creeping back up so we try to avoid this here */
if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE)
|| (apm_status == APM_BATTERY_STATUS_HIGH)
|| percent <= sharpsl_pm.battstat.mainbat_percent) {
sharpsl_pm.battstat.mainbat_voltage = voltage;
sharpsl_pm.battstat.mainbat_status = apm_status;
sharpsl_pm.battstat.mainbat_percent = percent;
}
dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %ld\n", voltage,
sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies);
/* Suspend if critical battery level */
if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
&& (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL)
&& !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) {
sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
dev_err(sharpsl_pm.dev, "Fatal Off\n");
apm_queue_event(APM_CRITICAL_SUSPEND);
}
schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME);
}
void sharpsl_pm_led(int val)
{
if (val == SHARPSL_LED_ERROR) {
dev_err(sharpsl_pm.dev, "Charging Error!\n");
} else if (val == SHARPSL_LED_ON) {
dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
led_trigger_event(sharpsl_charge_led_trigger, LED_FULL);
} else {
dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
led_trigger_event(sharpsl_charge_led_trigger, LED_OFF);
}
}
static void sharpsl_charge_on(void)
{
dev_dbg(sharpsl_pm.dev, "Turning Charger On\n");
sharpsl_pm.full_count = 0;
sharpsl_pm.charge_mode = CHRG_ON;
schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250));
schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500));
}
static void sharpsl_charge_off(void)
{
dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n");
sharpsl_pm.machinfo->charge(0);
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.charge_mode = CHRG_OFF;
schedule_delayed_work(&sharpsl_bat, 0);
}
static void sharpsl_charge_error(void)
{
sharpsl_pm_led(SHARPSL_LED_ERROR);
sharpsl_pm.machinfo->charge(0);
sharpsl_pm.charge_mode = CHRG_ERROR;
}
static void sharpsl_charge_toggle(struct work_struct *private_)
{
dev_dbg(sharpsl_pm.dev, "Toggling Charger at time: %lx\n", jiffies);
if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
sharpsl_charge_off();
return;
} else if ((sharpsl_check_battery_temp() < 0) || (sharpsl_ac_check() < 0)) {
sharpsl_charge_error();
return;
}
sharpsl_pm_led(SHARPSL_LED_ON);
sharpsl_pm.machinfo->charge(0);
mdelay(SHARPSL_CHARGE_WAIT_TIME);
sharpsl_pm.machinfo->charge(1);
sharpsl_pm.charge_start_time = jiffies;
}
static void sharpsl_ac_timer(unsigned long data)
{
int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
dev_dbg(sharpsl_pm.dev, "AC Status: %d\n", acin);
sharpsl_average_clear();
if (acin && (sharpsl_pm.charge_mode != CHRG_ON))
sharpsl_charge_on();
else if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
schedule_delayed_work(&sharpsl_bat, 0);
}
static irqreturn_t sharpsl_ac_isr(int irq, void *dev_id)
{
/* Delay the event slightly to debounce */
/* Must be a smaller delay than the chrg_full_isr below */
mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
return IRQ_HANDLED;
}
static void sharpsl_chrg_full_timer(unsigned long data)
{
dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies);
sharpsl_pm.full_count++;
if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n");
if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
} else if (sharpsl_pm.full_count < 2) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
schedule_delayed_work(&toggle_charger, 0);
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
schedule_delayed_work(&toggle_charger, 0);
} else {
sharpsl_charge_off();
sharpsl_pm.charge_mode = CHRG_DONE;
dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n");
}
}
/* Charging Finished Interrupt (Not present on Corgi) */
/* Can trigger at the same time as an AC status change so
delay until after that has been processed */
static irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id)
{
if (sharpsl_pm.flags & SHARPSL_SUSPENDED)
return IRQ_HANDLED;
/* delay until after any ac interrupt */
mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500));
return IRQ_HANDLED;
}
static irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id)
{
int is_fatal = 0;
if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) {
dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n");
is_fatal = 1;
}
if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_FATAL)) {
dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n");
is_fatal = 1;
}
if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) {
sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
apm_queue_event(APM_CRITICAL_SUSPEND);
}
return IRQ_HANDLED;
}
/*
* Maintain an average of the last 10 readings
*/
#define SHARPSL_CNV_VALUE_NUM 10
static int sharpsl_ad_index;
static void sharpsl_average_clear(void)
{
sharpsl_ad_index = 0;
}
static int sharpsl_average_value(int ad)
{
int i, ad_val = 0;
static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1];
if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) {
sharpsl_ad_index = 0;
return ad;
}
sharpsl_ad[sharpsl_ad_index] = ad;
sharpsl_ad_index++;
if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) {
for (i = 0; i < (SHARPSL_CNV_VALUE_NUM-1); i++)
sharpsl_ad[i] = sharpsl_ad[i+1];
sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1;
}
for (i = 0; i < sharpsl_ad_index; i++)
ad_val += sharpsl_ad[i];
return ad_val / sharpsl_ad_index;
}
/*
* Take an array of 5 integers, remove the maximum and minimum values
* and return the average.
*/
static int get_select_val(int *val)
{
int i, j, k, temp, sum = 0;
/* Find MAX val */
temp = val[0];
j = 0;
for (i = 1; i < 5; i++) {
if (temp < val[i]) {
temp = val[i];
j = i;
}
}
/* Find MIN val */
temp = val[4];
k = 4;
for (i = 3; i >= 0; i--) {
if (temp > val[i]) {
temp = val[i];
k = i;
}
}
for (i = 0; i < 5; i++)
if (i != j && i != k)
sum += val[i];
dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]);
return sum/3;
}
static int sharpsl_check_battery_temp(void)
{
int val, i, buff[5];
/* Check battery temperature */
for (i = 0; i < 5; i++) {
mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
sharpsl_pm.machinfo->measure_temp(1);
mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_TEMP);
sharpsl_pm.machinfo->measure_temp(0);
}
val = get_select_val(buff);
dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val);
if (val > sharpsl_pm.machinfo->charge_on_temp) {
printk(KERN_WARNING "Not charging: temperature out of limits.\n");
return -1;
}
return 0;
}
#ifdef CONFIG_PM
static int sharpsl_check_battery_voltage(void)
{
int val, i, buff[5];
/* disable charge, enable discharge */
sharpsl_pm.machinfo->charge(0);
sharpsl_pm.machinfo->discharge(1);
mdelay(SHARPSL_WAIT_DISCHARGE_ON);
if (sharpsl_pm.machinfo->discharge1)
sharpsl_pm.machinfo->discharge1(1);
/* Check battery voltage */
for (i = 0; i < 5; i++) {
buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
}
if (sharpsl_pm.machinfo->discharge1)
sharpsl_pm.machinfo->discharge1(0);
sharpsl_pm.machinfo->discharge(0);
val = get_select_val(buff);
dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val);
if (val < sharpsl_pm.machinfo->charge_on_volt)
return -1;
return 0;
}
#endif
static int sharpsl_ac_check(void)
{
int temp, i, buff[5];
for (i = 0; i < 5; i++) {
buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_ACIN_VOLT);
mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN);
}
temp = get_select_val(buff);
dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n", temp);
if ((temp > sharpsl_pm.machinfo->charge_acin_high) || (temp < sharpsl_pm.machinfo->charge_acin_low)) {
dev_err(sharpsl_pm.dev, "Error: AC check failed: voltage %d.\n", temp);
return -1;
}
return 0;
}
#ifdef CONFIG_PM
static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
{
sharpsl_pm.flags |= SHARPSL_SUSPENDED;
flush_delayed_work_sync(&toggle_charger);
flush_delayed_work_sync(&sharpsl_bat);
if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
else
sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
return 0;
}
static int sharpsl_pm_resume(struct platform_device *pdev)
{
/* Clear the reset source indicators as they break the bootloader upon reboot */
RCSR = 0x0f;
sharpsl_average_clear();
sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED;
sharpsl_pm.flags &= ~SHARPSL_SUSPENDED;
return 0;
}
static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
{
dev_dbg(sharpsl_pm.dev, "Time is: %08x\n", RCNR);
dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n", sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG);
/* not charging and AC-IN! */
if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN))) {
dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n");
sharpsl_pm.charge_mode = CHRG_OFF;
sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
sharpsl_off_charge_battery();
}
sharpsl_pm.machinfo->presuspend();
PEDR = 0xffffffff; /* clear it */
sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE;
if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) {
RTSR &= RTSR_ALE;
RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND;
dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n", RTAR);
sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE;
} else if (alarm_enable) {
RTSR &= RTSR_ALE;
RTAR = alarm_time;
dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n", RTAR);
} else {
dev_dbg(sharpsl_pm.dev, "No alarms set.\n");
}
pxa_pm_enter(state);
sharpsl_pm.machinfo->postsuspend();
dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n", PEDR);
}
static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
{
if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable)) {
if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) {
dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n");
corgi_goto_sleep(alarm_time, alarm_enable, state);
return 1;
}
if (sharpsl_off_charge_battery()) {
dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n");
corgi_goto_sleep(alarm_time, alarm_enable, state);
return 1;
}
dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n");
}
if ((!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) ||
(!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_FATAL))) {
dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n");
corgi_goto_sleep(alarm_time, alarm_enable, state);
return 1;
}
return 0;
}
static int corgi_pxa_pm_enter(suspend_state_t state)
{
unsigned long alarm_time = RTAR;
unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0);
dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n");
corgi_goto_sleep(alarm_time, alarm_status, state);
while (corgi_enter_suspend(alarm_time, alarm_status, state))
{}
if (sharpsl_pm.machinfo->earlyresume)
sharpsl_pm.machinfo->earlyresume();
dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n");
return 0;
}
/*
* Check for fatal battery errors
* Fatal returns -1
*/
static int sharpsl_fatal_check(void)
{
int buff[5], temp, i, acin;
dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check entered\n");
/* Check AC-Adapter */
acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
sharpsl_pm.machinfo->charge(0);
udelay(100);
sharpsl_pm.machinfo->discharge(1); /* enable discharge */
mdelay(SHARPSL_WAIT_DISCHARGE_ON);
}
if (sharpsl_pm.machinfo->discharge1)
sharpsl_pm.machinfo->discharge1(1);
/* Check battery : check inserting battery ? */
for (i = 0; i < 5; i++) {
buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
}
if (sharpsl_pm.machinfo->discharge1)
sharpsl_pm.machinfo->discharge1(0);
if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
udelay(100);
sharpsl_pm.machinfo->charge(1);
sharpsl_pm.machinfo->discharge(0);
}
temp = get_select_val(buff);
dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check: acin: %d, discharge voltage: %d, no discharge: %ld\n", acin, temp, sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT));
if ((acin && (temp < sharpsl_pm.machinfo->fatal_acin_volt)) ||
(!acin && (temp < sharpsl_pm.machinfo->fatal_noacin_volt)))
return -1;
return 0;
}
static int sharpsl_off_charge_error(void)
{
dev_err(sharpsl_pm.dev, "Offline Charger: Error occurred.\n");
sharpsl_pm.machinfo->charge(0);
sharpsl_pm_led(SHARPSL_LED_ERROR);
sharpsl_pm.charge_mode = CHRG_ERROR;
return 1;
}
/*
* Charging Control while suspended
* Return 1 - go straight to sleep
* Return 0 - sleep or wakeup depending on other factors
*/
static int sharpsl_off_charge_battery(void)
{
int time;
dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode);
if (sharpsl_pm.charge_mode == CHRG_OFF) {
dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n");
/* AC Check */
if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery_temp() < 0))
return sharpsl_off_charge_error();
/* Start Charging */
sharpsl_pm_led(SHARPSL_LED_ON);
sharpsl_pm.machinfo->charge(0);
mdelay(SHARPSL_CHARGE_WAIT_TIME);
sharpsl_pm.machinfo->charge(1);
sharpsl_pm.charge_mode = CHRG_ON;
sharpsl_pm.full_count = 0;
return 1;
} else if (sharpsl_pm.charge_mode != CHRG_ON) {
return 1;
}
if (sharpsl_pm.full_count == 0) {
int time;
dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n");
if ((sharpsl_check_battery_temp() < 0) || (sharpsl_check_battery_voltage() < 0))
return sharpsl_off_charge_error();
sharpsl_pm.machinfo->charge(0);
mdelay(SHARPSL_CHARGE_WAIT_TIME);
sharpsl_pm.machinfo->charge(1);
sharpsl_pm.charge_mode = CHRG_ON;
mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
time = RCNR;
while (1) {
/* Check if any wakeup event had occurred */
if (sharpsl_pm.machinfo->charger_wakeup() != 0)
return 0;
/* Check for timeout */
if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
return 1;
if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) {
dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occurred. Retrying to check\n");
sharpsl_pm.full_count++;
sharpsl_pm.machinfo->charge(0);
mdelay(SHARPSL_CHARGE_WAIT_TIME);
sharpsl_pm.machinfo->charge(1);
return 1;
}
}
}
dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n");
mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
time = RCNR;
while (1) {
/* Check if any wakeup event had occurred */
if (sharpsl_pm.machinfo->charger_wakeup())
return 0;
/* Check for timeout */
if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) {
if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) {
dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n");
sharpsl_pm.full_count = 0;
}
sharpsl_pm.full_count++;
return 1;
}
if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) {
dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n");
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.machinfo->charge(0);
sharpsl_pm.charge_mode = CHRG_DONE;
return 1;
}
}
}
#else
#define sharpsl_pm_suspend NULL
#define sharpsl_pm_resume NULL
#endif
static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", sharpsl_pm.battstat.mainbat_percent);
}
static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", sharpsl_pm.battstat.mainbat_voltage);
}
static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
extern void (*apm_get_power_status)(struct apm_power_info *);
static void sharpsl_apm_get_power_status(struct apm_power_info *info)
{
info->ac_line_status = sharpsl_pm.battstat.ac_status;
if (sharpsl_pm.charge_mode == CHRG_ON)
info->battery_status = APM_BATTERY_STATUS_CHARGING;
else
info->battery_status = sharpsl_pm.battstat.mainbat_status;
info->battery_flag = (1 << info->battery_status);
info->battery_life = sharpsl_pm.battstat.mainbat_percent;
}
#ifdef CONFIG_PM
static const struct platform_suspend_ops sharpsl_pm_ops = {
.prepare = pxa_pm_prepare,
.finish = pxa_pm_finish,
.enter = corgi_pxa_pm_enter,
.valid = suspend_valid_only_mem,
};
#endif
static int __devinit sharpsl_pm_probe(struct platform_device *pdev)
{
int ret;
if (!pdev->dev.platform_data)
return -EINVAL;
sharpsl_pm.dev = &pdev->dev;
sharpsl_pm.machinfo = pdev->dev.platform_data;
sharpsl_pm.charge_mode = CHRG_OFF;
sharpsl_pm.flags = 0;
init_timer(&sharpsl_pm.ac_timer);
sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
init_timer(&sharpsl_pm.chrg_full_timer);
sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
sharpsl_pm.machinfo->init();
gpio_request(sharpsl_pm.machinfo->gpio_acin, "AC IN");
gpio_direction_input(sharpsl_pm.machinfo->gpio_acin);
gpio_request(sharpsl_pm.machinfo->gpio_batfull, "Battery Full");
gpio_direction_input(sharpsl_pm.machinfo->gpio_batfull);
gpio_request(sharpsl_pm.machinfo->gpio_batlock, "Battery Lock");
gpio_direction_input(sharpsl_pm.machinfo->gpio_batlock);
/* Register interrupt handlers */
if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "AC Input Detect", sharpsl_ac_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin));
}
if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Battery Cover", sharpsl_fatal_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock));
}
if (sharpsl_pm.machinfo->gpio_fatal) {
if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, "Fatal Battery", sharpsl_fatal_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal));
}
}
if (sharpsl_pm.machinfo->batfull_irq) {
/* Register interrupt handler. */
if (request_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING, "CO", sharpsl_chrg_full_isr)) {
dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull));
}
}
ret = device_create_file(&pdev->dev, &dev_attr_battery_percentage);
ret |= device_create_file(&pdev->dev, &dev_attr_battery_voltage);
if (ret != 0)
dev_warn(&pdev->dev, "Failed to register attributes (%d)\n", ret);
apm_get_power_status = sharpsl_apm_get_power_status;
#ifdef CONFIG_PM
suspend_set_ops(&sharpsl_pm_ops);
#endif
mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
return 0;
}
static int sharpsl_pm_remove(struct platform_device *pdev)
{
suspend_set_ops(NULL);
device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
led_trigger_unregister_simple(sharpsl_charge_led_trigger);
free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
if (sharpsl_pm.machinfo->gpio_fatal)
free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr);
if (sharpsl_pm.machinfo->batfull_irq)
free_irq(PXA_GPIO_TO_IRQ(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
gpio_free(sharpsl_pm.machinfo->gpio_batlock);
gpio_free(sharpsl_pm.machinfo->gpio_batfull);
gpio_free(sharpsl_pm.machinfo->gpio_acin);
if (sharpsl_pm.machinfo->exit)
sharpsl_pm.machinfo->exit();
del_timer_sync(&sharpsl_pm.chrg_full_timer);
del_timer_sync(&sharpsl_pm.ac_timer);
return 0;
}
static struct platform_driver sharpsl_pm_driver = {
.probe = sharpsl_pm_probe,
.remove = sharpsl_pm_remove,
.suspend = sharpsl_pm_suspend,
.resume = sharpsl_pm_resume,
.driver = {
.name = "sharpsl-pm",
},
};
static int __devinit sharpsl_pm_init(void)
{
return platform_driver_register(&sharpsl_pm_driver);
}
static void sharpsl_pm_exit(void)
{
platform_driver_unregister(&sharpsl_pm_driver);
}
late_initcall(sharpsl_pm_init);
module_exit(sharpsl_pm_exit);
| gpl-2.0 |
gbrouse/linux-2.6-imx | arch/arm/mach-spear3xx/spear300_evb.c | 4828 | 1843 | /*
* arch/arm/mach-spear3xx/spear300_evb.c
*
* SPEAr300 evaluation board source file
*
* Copyright (C) 2009 ST Microelectronics
* Viresh Kumar<viresh.kumar@st.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <mach/generic.h>
#include <mach/hardware.h>
/* padmux devices to enable */
static struct pmx_dev *pmx_devs[] = {
/* spear3xx specific devices */
&spear3xx_pmx_i2c,
&spear3xx_pmx_ssp_cs,
&spear3xx_pmx_ssp,
&spear3xx_pmx_mii,
&spear3xx_pmx_uart0,
/* spear300 specific devices */
&spear300_pmx_fsmc_2_chips,
&spear300_pmx_clcd,
&spear300_pmx_telecom_sdhci_4bit,
&spear300_pmx_gpio1,
};
static struct amba_device *amba_devs[] __initdata = {
/* spear3xx specific devices */
&spear3xx_gpio_device,
&spear3xx_uart_device,
/* spear300 specific devices */
&spear300_gpio1_device,
};
static struct platform_device *plat_devs[] __initdata = {
/* spear3xx specific devices */
/* spear300 specific devices */
};
static void __init spear300_evb_init(void)
{
unsigned int i;
/* call spear300 machine init function */
spear300_init(&spear300_photo_frame_mode, pmx_devs,
ARRAY_SIZE(pmx_devs));
/* Add Platform Devices */
platform_add_devices(plat_devs, ARRAY_SIZE(plat_devs));
/* Add Amba Devices */
for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
amba_device_register(amba_devs[i], &iomem_resource);
}
MACHINE_START(SPEAR300, "ST-SPEAR300-EVB")
.atag_offset = 0x100,
.map_io = spear3xx_map_io,
.init_irq = spear3xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &spear3xx_timer,
.init_machine = spear300_evb_init,
.restart = spear_restart,
MACHINE_END
| gpl-2.0 |
NoelMacwan/android_kernel_sony_msm8928 | arch/arm/mach-mxs/timer.c | 4828 | 8489 | /*
* Copyright (C) 2000-2001 Deep Blue Solutions
* Copyright (C) 2002 Shane Nay (shane@minirl.com)
* Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
* Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
* Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
#include <asm/mach/time.h>
#include <mach/mxs.h>
#include <mach/common.h>
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
* extends the counter to 32 bits.
*
* The implementation uses two timers, one for clock_event and
* another for clocksource. MX28 uses timrot 0 and 1, while MX23
* uses 0 and 2.
*/
#define MX23_TIMROT_VERSION_OFFSET 0x0a0
#define MX28_TIMROT_VERSION_OFFSET 0x120
#define BP_TIMROT_MAJOR_VERSION 24
#define BV_TIMROT_VERSION_1 0x01
#define BV_TIMROT_VERSION_2 0x02
#define timrot_is_v1() (timrot_major_version == BV_TIMROT_VERSION_1)
/*
* There are 4 registers for each timrotv2 instance, and 2 registers
* for each timrotv1. So address step 0x40 in macros below strides
* one instance of timrotv2 while two instances of timrotv1.
*
* As the result, HW_TIMROT_XXXn(1) defines the address of timrot1
* on MX28 while timrot2 on MX23.
*/
/* common between v1 and v2 */
#define HW_TIMROT_ROTCTRL 0x00
#define HW_TIMROT_TIMCTRLn(n) (0x20 + (n) * 0x40)
/* v1 only */
#define HW_TIMROT_TIMCOUNTn(n) (0x30 + (n) * 0x40)
/* v2 only */
#define HW_TIMROT_RUNNING_COUNTn(n) (0x30 + (n) * 0x40)
#define HW_TIMROT_FIXED_COUNTn(n) (0x40 + (n) * 0x40)
#define BM_TIMROT_TIMCTRLn_RELOAD (1 << 6)
#define BM_TIMROT_TIMCTRLn_UPDATE (1 << 7)
#define BM_TIMROT_TIMCTRLn_IRQ_EN (1 << 14)
#define BM_TIMROT_TIMCTRLn_IRQ (1 << 15)
#define BP_TIMROT_TIMCTRLn_SELECT 0
#define BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL 0x8
#define BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL 0xb
static struct clock_event_device mxs_clockevent_device;
static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED;
static void __iomem *mxs_timrot_base = MXS_IO_ADDRESS(MXS_TIMROT_BASE_ADDR);
static u32 timrot_major_version;
static inline void timrot_irq_disable(void)
{
__mxs_clrl(BM_TIMROT_TIMCTRLn_IRQ_EN,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
}
static inline void timrot_irq_enable(void)
{
__mxs_setl(BM_TIMROT_TIMCTRLn_IRQ_EN,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
}
static void timrot_irq_acknowledge(void)
{
__mxs_clrl(BM_TIMROT_TIMCTRLn_IRQ,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
}
static cycle_t timrotv1_get_cycles(struct clocksource *cs)
{
return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1))
& 0xffff0000) >> 16);
}
static int timrotv1_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
/* timrot decrements the count */
__raw_writel(evt, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(0));
return 0;
}
static int timrotv2_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
/* timrot decrements the count */
__raw_writel(evt, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(0));
return 0;
}
static irqreturn_t mxs_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
timrot_irq_acknowledge();
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction mxs_timer_irq = {
.name = "MXS Timer Tick",
.dev_id = &mxs_clockevent_device,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = mxs_timer_interrupt,
};
#ifdef DEBUG
static const char *clock_event_mode_label[] const = {
[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
[CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT",
[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
[CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED"
};
#endif /* DEBUG */
static void mxs_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
/* Disable interrupt in timer module */
timrot_irq_disable();
if (mode != mxs_clockevent_mode) {
/* Set event time into the furthest future */
if (timrot_is_v1())
__raw_writel(0xffff,
mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
else
__raw_writel(0xffffffff,
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* Clear pending interrupt */
timrot_irq_acknowledge();
}
#ifdef DEBUG
pr_info("%s: changing mode from %s to %s\n", __func__,
clock_event_mode_label[mxs_clockevent_mode],
clock_event_mode_label[mode]);
#endif /* DEBUG */
/* Remember timer mode */
mxs_clockevent_mode = mode;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
pr_err("%s: Periodic mode is not implemented\n", __func__);
break;
case CLOCK_EVT_MODE_ONESHOT:
timrot_irq_enable();
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_RESUME:
/* Left event sources disabled, no more interrupts appear */
break;
}
}
static struct clock_event_device mxs_clockevent_device = {
.name = "mxs_timrot",
.features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.set_mode = mxs_set_mode,
.set_next_event = timrotv2_set_next_event,
.rating = 200,
};
static int __init mxs_clockevent_init(struct clk *timer_clk)
{
unsigned int c = clk_get_rate(timer_clk);
mxs_clockevent_device.mult =
div_sc(c, NSEC_PER_SEC, mxs_clockevent_device.shift);
mxs_clockevent_device.cpumask = cpumask_of(0);
if (timrot_is_v1()) {
mxs_clockevent_device.set_next_event = timrotv1_set_next_event;
mxs_clockevent_device.max_delta_ns =
clockevent_delta2ns(0xfffe, &mxs_clockevent_device);
mxs_clockevent_device.min_delta_ns =
clockevent_delta2ns(0xf, &mxs_clockevent_device);
} else {
mxs_clockevent_device.max_delta_ns =
clockevent_delta2ns(0xfffffffe, &mxs_clockevent_device);
mxs_clockevent_device.min_delta_ns =
clockevent_delta2ns(0xf, &mxs_clockevent_device);
}
clockevents_register_device(&mxs_clockevent_device);
return 0;
}
static struct clocksource clocksource_mxs = {
.name = "mxs_timer",
.rating = 200,
.read = timrotv1_get_cycles,
.mask = CLOCKSOURCE_MASK(16),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init mxs_clocksource_init(struct clk *timer_clk)
{
unsigned int c = clk_get_rate(timer_clk);
if (timrot_is_v1())
clocksource_register_hz(&clocksource_mxs, c);
else
clocksource_mmio_init(mxs_timrot_base + HW_TIMROT_RUNNING_COUNTn(1),
"mxs_timer", c, 200, 32, clocksource_mmio_readl_down);
return 0;
}
void __init mxs_timer_init(struct clk *timer_clk, int irq)
{
clk_prepare_enable(timer_clk);
/*
* Initialize timers to a known state
*/
mxs_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL);
/* get timrot version */
timrot_major_version = __raw_readl(mxs_timrot_base +
(cpu_is_mx23() ? MX23_TIMROT_VERSION_OFFSET :
MX28_TIMROT_VERSION_OFFSET));
timrot_major_version >>= BP_TIMROT_MAJOR_VERSION;
/* one for clock_event */
__raw_writel((timrot_is_v1() ?
BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL) |
BM_TIMROT_TIMCTRLn_UPDATE |
BM_TIMROT_TIMCTRLn_IRQ_EN,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(0));
/* another for clocksource */
__raw_writel((timrot_is_v1() ?
BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL :
BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL) |
BM_TIMROT_TIMCTRLn_RELOAD,
mxs_timrot_base + HW_TIMROT_TIMCTRLn(1));
/* set clocksource timer fixed count to the maximum */
if (timrot_is_v1())
__raw_writel(0xffff,
mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
else
__raw_writel(0xffffffff,
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
mxs_clocksource_init(timer_clk);
mxs_clockevent_init(timer_clk);
/* Make irqs happen */
setup_irq(irq, &mxs_timer_irq);
}
| gpl-2.0 |
faux123/htc-m7 | drivers/staging/tidspbridge/core/tiomap3430.c | 4828 | 53078 | /*
* tiomap.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Processor Manager Driver for TI OMAP3430 EVM.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <plat/dsp.h>
#include <linux/types.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspchnl.h>
#include <dspbridge/dspdeh.h>
#include <dspbridge/dspio.h>
#include <dspbridge/dspmsg.h>
#include <dspbridge/pwr.h>
#include <dspbridge/io_sm.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dmm.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- Local */
#include "_tiomap.h"
#include "_tiomap_pwr.h"
#include "tiomap_io.h"
/* Offset in shared mem to write to in order to synchronize start with DSP */
#define SHMSYNCOFFSET 4 /* GPP byte offset */
#define BUFFERSIZE 1024
#define TIHELEN_ACKTIMEOUT 10000
#define MMU_SECTION_ADDR_MASK 0xFFF00000
#define MMU_SSECTION_ADDR_MASK 0xFF000000
#define MMU_LARGE_PAGE_MASK 0xFFFF0000
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
#define PAGES_II_LVL_TABLE 512
#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
/*
* This is a totally ugly layer violation, but needed until
* omap_ctrl_set_dsp_boot*() are provided.
*/
#define OMAP3_IVA2_BOOTMOD_IDLE 1
#define OMAP2_CONTROL_GENERAL 0x270
#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
u8 *host_buff,
u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type);
static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 dsp_addr);
static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
int *board_state);
static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff,
u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type);
static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
u32 brd_state);
static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
u32 dsp_dest_addr, u32 dsp_src_addr,
u32 ul_num_bytes, u32 mem_type);
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type);
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages);
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes);
static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt,
struct dev_object *hdev_obj,
struct cfg_hostres *config_param);
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs);
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
static u32 user_va2_pa(struct mm_struct *mm, u32 address);
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs);
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs);
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs);
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
/* ----------------------------------- Globals */
/* Attributes of L2 page tables for DSP MMU */
struct page_info {
u32 num_entries; /* Number of valid PTEs in the L2 PT */
};
/* Attributes used to manage the DSP MMU page tables */
struct pg_table_attrs {
spinlock_t pg_lock; /* Critical section object handle */
u32 l1_base_pa; /* Physical address of the L1 PT */
u32 l1_base_va; /* Virtual address of the L1 PT */
u32 l1_size; /* Size of the L1 PT */
u32 l1_tbl_alloc_pa;
/* Physical address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_va;
/* Virtual address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_sz;
/* Size of consistent memory allocated for L1 table.
* May not be aligned */
u32 l2_base_pa; /* Physical address of the L2 PT */
u32 l2_base_va; /* Virtual address of the L2 PT */
u32 l2_size; /* Size of the L2 PT */
u32 l2_tbl_alloc_pa;
/* Physical address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_va;
/* Virtual address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_sz;
/* Size of consistent memory allocated for L2 table.
* May not be aligned */
u32 l2_num_pages; /* Number of allocated L2 PT */
/* Array [l2_num_pages] of L2 PT info structs */
struct page_info *pg_info;
};
/*
* This Bridge driver's function interface table.
*/
static struct bridge_drv_interface drv_interface_fxns = {
/* Bridge API ver. for which this bridge driver is built. */
BRD_API_MAJOR_VERSION,
BRD_API_MINOR_VERSION,
bridge_dev_create,
bridge_dev_destroy,
bridge_dev_ctrl,
bridge_brd_monitor,
bridge_brd_start,
bridge_brd_stop,
bridge_brd_status,
bridge_brd_read,
bridge_brd_write,
bridge_brd_set_state,
bridge_brd_mem_copy,
bridge_brd_mem_write,
bridge_brd_mem_map,
bridge_brd_mem_un_map,
/* The following CHNL functions are provided by chnl_io.lib: */
bridge_chnl_create,
bridge_chnl_destroy,
bridge_chnl_open,
bridge_chnl_close,
bridge_chnl_add_io_req,
bridge_chnl_get_ioc,
bridge_chnl_cancel_io,
bridge_chnl_flush_io,
bridge_chnl_get_info,
bridge_chnl_get_mgr_info,
bridge_chnl_idle,
bridge_chnl_register_notify,
/* The following IO functions are provided by chnl_io.lib: */
bridge_io_create,
bridge_io_destroy,
bridge_io_on_loaded,
bridge_io_get_proc_load,
/* The following msg_ctrl functions are provided by chnl_io.lib: */
bridge_msg_create,
bridge_msg_create_queue,
bridge_msg_delete,
bridge_msg_delete_queue,
bridge_msg_get,
bridge_msg_put,
bridge_msg_register_notify,
bridge_msg_set_queue_id,
};
static struct notifier_block dsp_mbox_notifier = {
.notifier_call = io_mbox_msg,
};
static inline void flush_all(struct bridge_dev_context *dev_context)
{
if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
dev_context->brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
}
static void bad_page_dump(u32 pa, struct page *pg)
{
pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
pr_emerg("Bad page state in process '%s'\n"
"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
"Backtrace:\n",
current->comm, pg, (int)(2 * sizeof(unsigned long)),
(unsigned long)pg->flags, pg->mapping,
page_mapcount(pg), page_count(pg));
dump_stack();
}
/*
* ======== bridge_drv_entry ========
* purpose:
* Bridge Driver entry point.
*/
void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
const char *driver_file_name)
{
if (strcmp(driver_file_name, "UMA") == 0)
*drv_intf = &drv_interface_fxns;
else
dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
}
/*
* ======== bridge_brd_monitor ========
* purpose:
* This bridge_brd_monitor puts DSP into a Loadable state.
* i.e Application can load and start the device.
*
* Preconditions:
* Device in 'OFF' state.
*/
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
{
struct bridge_dev_context *dev_context = dev_ctxt;
u32 temp;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
if (!(temp & 0x02)) {
/* IVA2 is not in ON state */
/* Read and set PM_PWSTCTRL_IVA2 to ON */
(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
/* Set the SW supervised state transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Wait until the state has moved to ON */
while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_INTRANSITION_MASK)
;
/* Disable Automatic transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
}
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clk_enable(DSP_CLK_IVA2);
/* set the device state to IDLE */
dev_context->brd_state = BRD_IDLE;
return 0;
}
/*
* ======== bridge_brd_read ========
* purpose:
* Reads buffers for DSP memory.
*/
static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 offset;
u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
/* change here to account for the 3 bands of the DSP internal memory */
if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->internal_size) {
offset = dsp_addr - dev_context->dsp_start_add;
} else {
status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
ul_num_bytes, mem_type);
return status;
}
/* copy the data from DSP memory, */
memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
return status;
}
/*
* ======== bridge_brd_set_state ========
* purpose:
* This routine updates the Board status.
*/
static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
u32 brd_state)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
dev_context->brd_state = brd_state;
return status;
}
/*
* ======== bridge_brd_start ========
* purpose:
* Initializes DSP MMU and Starts DSP.
*
* Preconditions:
* a) DSP domain is 'ACTIVE'.
* b) DSP_RST1 is asserted.
* b) DSP_RST2 is released.
*/
static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 dsp_addr)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 dw_sync_addr = 0;
u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
/* Offset of shm_base_virt from tlb_base_virt */
u32 ul_shm_offset_virt;
s32 entry_ndx;
s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
struct cfg_hostres *resources = NULL;
u32 temp;
u32 ul_dsp_clk_rate;
u32 ul_dsp_clk_addr;
u32 ul_bios_gp_timer;
u32 clk_cmd;
struct io_mgr *hio_mgr;
u32 ul_load_monitor_timer;
u32 wdt_en = 0;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
/* The device context contains all the mmu setup info from when the
* last dsp base image was loaded. The first entry is always
* SHMMEM base. */
/* Get SHM_BEG - convert to byte address */
(void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
/* DSP Virtual address */
ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
/* 2nd wd is used as sync field */
dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
/* Write a signature into the shm base + offset; this will
* get cleared when the DSP program starts. */
if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
pr_err("%s: Illegal SM base\n", __func__);
status = -EPERM;
} else
__raw_writel(0xffffffff, dw_sync_addr);
if (!status) {
resources = dev_context->resources;
if (!resources)
status = -EPERM;
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
/*
* XXX: ioremapping MUST be removed once ctrl
* function is made available.
*/
void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
if (!ctrl)
return -ENOMEM;
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
/*
* Set bootmode to self loop if dsp_debug flag is true
*/
__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
iounmap(ctrl);
}
}
if (!status) {
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
udelay(100);
/* Disbale the DSP MMU */
hw_mmu_disable(resources->dmmu_base);
/* Disable TWL */
hw_mmu_twl_disable(resources->dmmu_base);
/* Only make TLB entry if both addresses are non-zero */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
entry_ndx++) {
struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = e->endianism,
.element_size = e->elem_size,
.mixed_size = e->mixed_mode,
};
if (!e->gpp_pa || !e->dsp_va)
continue;
dev_dbg(bridge,
"MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
itmp_entry_ndx,
e->gpp_pa,
e->dsp_va,
e->size);
hw_mmu_tlb_add(dev_context->dsp_mmu_base,
e->gpp_pa,
e->dsp_va,
e->size,
itmp_entry_ndx,
&map_attrs, 1, 1);
itmp_entry_ndx++;
}
}
/* Lock the above TLB entries and get the BIOS and load monitor timer
* information */
if (!status) {
hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
hw_mmu_ttb_set(resources->dmmu_base,
dev_context->pt_attrs->l1_base_pa);
hw_mmu_twl_enable(resources->dmmu_base);
/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
temp = __raw_readl((resources->dmmu_base) + 0x10);
temp = (temp & 0xFFFFFFEF) | 0x11;
__raw_writel(temp, (resources->dmmu_base) + 0x10);
/* Let the DSP MMU run */
hw_mmu_enable(resources->dmmu_base);
/* Enable the BIOS clock */
(void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
(void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_LOADMON_GPTIMER,
&ul_load_monitor_timer);
}
if (!status) {
if (ul_load_monitor_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_load_monitor_timer;
dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
} else {
dev_dbg(bridge, "Not able to get the symbol for Load "
"Monitor Timer\n");
}
}
if (!status) {
if (ul_bios_gp_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_bios_gp_timer;
dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
} else {
dev_dbg(bridge,
"Not able to get the symbol for BIOS Timer\n");
}
}
if (!status) {
/* Set the DSP clock rate */
(void)dev_get_symbol(dev_context->dev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
/*Set Autoidle Mode for IVA2 PLL */
(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
if ((unsigned int *)ul_dsp_clk_addr != NULL) {
/* Get the clock rate */
ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
__func__, ul_dsp_clk_rate);
(void)bridge_brd_write(dev_context,
(u8 *) &ul_dsp_clk_rate,
ul_dsp_clk_addr, sizeof(u32), 0);
}
/*
* Enable Mailbox events and also drain any pending
* stale messages.
*/
dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
if (IS_ERR(dev_context->mbox)) {
dev_context->mbox = NULL;
pr_err("%s: Failed to get dsp mailbox handle\n",
__func__);
status = -EPERM;
}
}
if (!status) {
/*PM_IVA2GRPSEL_PER = 0xC0;*/
temp = readl(resources->per_pm_base + 0xA8);
temp = (temp & 0xFFFFFF30) | 0xC0;
writel(temp, resources->per_pm_base + 0xA8);
/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
temp = readl(resources->per_pm_base + 0xA4);
temp = (temp & 0xFFFFFF3F);
writel(temp, resources->per_pm_base + 0xA4);
/*CM_SLEEPDEP_PER |= 0x04; */
temp = readl(resources->per_base + 0x44);
temp = (temp & 0xFFFFFFFB) | 0x04;
writel(temp, resources->per_base + 0x44);
/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Let DSP go */
dev_dbg(bridge, "%s Unreset\n", __func__);
/* Enable DSP MMU Interrupts */
hw_mmu_event_enable(resources->dmmu_base,
HW_MMU_ALL_INTERRUPTS);
/* release the RST1, DSP starts executing now .. */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
if (dsp_debug)
while (__raw_readw(dw_sync_addr))
;
/* Wait for DSP to clear word in shared memory */
/* Read the Location */
if (!wait_for_start(dev_context, dw_sync_addr))
status = -ETIMEDOUT;
dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
if (wdt_en) {
/* Start wdt */
dsp_wdt_sm_set((void *)ul_shm_base);
dsp_wdt_enable(true);
}
status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (hio_mgr) {
io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
/* Write the synchronization bit to indicate the
* completion of OPP table update to DSP
*/
__raw_writel(0XCAFECAFE, dw_sync_addr);
/* update board state */
dev_context->brd_state = BRD_RUNNING;
/* (void)chnlsm_enable_interrupt(dev_context); */
} else {
dev_context->brd_state = BRD_UNKNOWN;
}
}
return status;
}
/*
* ======== bridge_brd_stop ========
* purpose:
* Puts DSP in self loop.
*
* Preconditions :
* a) None
*/
static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt_attrs;
u32 dsp_pwr_state;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
if (dev_context->brd_state == BRD_STOPPED)
return status;
/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
* before turning off the clocks.. This is to ensure that there are no
* pending L3 or other transactons from IVA2 */
dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
if (dsp_pwr_state != PWRDM_POWER_OFF) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
mdelay(10);
/* IVA2 is not in OFF state */
/* Set PM_PWSTCTRL_IVA2 to OFF */
(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
/* Set the SW supervised state transition for Sleep */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
}
udelay(10);
/* Release the Ext Base virtual Address as the next DSP Program
* may have a different load address */
if (dev_context->dsp_ext_base_addr)
dev_context->dsp_ext_base_addr = 0;
dev_context->brd_state = BRD_STOPPED; /* update board state */
dsp_wdt_enable(false);
/* This is a good place to clear the MMU page tables as well */
if (dev_context->pt_attrs) {
pt_attrs = dev_context->pt_attrs;
memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
memset((u8 *) pt_attrs->pg_info, 0x00,
(pt_attrs->l2_num_pages * sizeof(struct page_info)));
}
/* Disable the mailbox interrupts */
if (dev_context->mbox) {
omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
dev_context->mbox = NULL;
}
/* Reset IVA2 clocks*/
(*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2);
return status;
}
/*
* ======== bridge_brd_status ========
* Returns the board status.
*/
static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
int *board_state)
{
struct bridge_dev_context *dev_context = dev_ctxt;
*board_state = dev_context->brd_state;
return 0;
}
/*
* ======== bridge_brd_write ========
* Copies the buffers to DSP internal or external memory.
*/
static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->internal_size) {
status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_num_bytes, mem_type);
} else {
status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
ul_num_bytes, mem_type, false);
}
return status;
}
/*
* ======== bridge_dev_create ========
* Creates a driver object. Puts DSP in self loop.
*/
static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt,
struct dev_object *hdev_obj,
struct cfg_hostres *config_param)
{
int status = 0;
struct bridge_dev_context *dev_context = NULL;
s32 entry_ndx;
struct cfg_hostres *resources = config_param;
struct pg_table_attrs *pt_attrs;
u32 pg_tbl_pa;
u32 pg_tbl_va;
u32 align_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* Allocate and initialize a data structure to contain the bridge driver
* state, which becomes the context for later calls into this driver */
dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
if (!dev_context) {
status = -ENOMEM;
goto func_end;
}
dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
dev_context->self_loop = (u32) NULL;
dev_context->dsp_per_clks = 0;
dev_context->internal_size = OMAP_DSP_SIZE;
/* Clear dev context MMU table entries.
* These get set on bridge_io_on_loaded() call after program loaded. */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
dev_context->atlb_entry[entry_ndx].gpp_pa =
dev_context->atlb_entry[entry_ndx].dsp_va = 0;
}
dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
(config_param->
mem_base
[3]),
config_param->
mem_length
[3]);
if (!dev_context->dsp_base_addr)
status = -EPERM;
pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
if (pt_attrs != NULL) {
pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
align_size = pt_attrs->l1_size;
/* Align sizes are expected to be power of 2 */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
align_size, &pg_tbl_pa);
/* Check if the PA is aligned for us */
if ((pg_tbl_pa) & (align_size - 1)) {
/* PA not aligned to page table size ,
* try with more allocation and align */
mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
pt_attrs->l1_size);
/* we like to get aligned on L1 table size */
pg_tbl_va =
(u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
align_size, &pg_tbl_pa);
/* We should be able to get aligned table now */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
/* Align the PA to the next 'align' boundary */
pt_attrs->l1_base_pa =
((pg_tbl_pa) +
(align_size - 1)) & (~(align_size - 1));
pt_attrs->l1_base_va =
pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
} else {
/* We got aligned PA, cool */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
pt_attrs->l1_base_pa = pg_tbl_pa;
pt_attrs->l1_base_va = pg_tbl_va;
}
if (pt_attrs->l1_base_va)
memset((u8 *) pt_attrs->l1_base_va, 0x00,
pt_attrs->l1_size);
/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
* L4 pages */
pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
pt_attrs->l2_num_pages;
align_size = 4; /* Make it u32 aligned */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
align_size, &pg_tbl_pa);
pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
pt_attrs->l2_base_pa = pg_tbl_pa;
pt_attrs->l2_base_va = pg_tbl_va;
if (pt_attrs->l2_base_va)
memset((u8 *) pt_attrs->l2_base_va, 0x00,
pt_attrs->l2_size);
pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
sizeof(struct page_info), GFP_KERNEL);
dev_dbg(bridge,
"L1 pa %x, va %x, size %x\n L2 pa %x, va "
"%x, size %x\n", pt_attrs->l1_base_pa,
pt_attrs->l1_base_va, pt_attrs->l1_size,
pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
pt_attrs->l2_size);
dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
}
if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
(pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
dev_context->pt_attrs = pt_attrs;
else
status = -ENOMEM;
if (!status) {
spin_lock_init(&pt_attrs->pg_lock);
dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
/* Set the Clock Divisor for the DSP module */
udelay(5);
/* MMU address is obtained from the host
* resources struct */
dev_context->dsp_mmu_base = resources->dmmu_base;
}
if (!status) {
dev_context->dev_obj = hdev_obj;
/* Store current board state. */
dev_context->brd_state = BRD_UNKNOWN;
dev_context->resources = resources;
dsp_clk_enable(DSP_CLK_IVA2);
bridge_brd_stop(dev_context);
/* Return ptr to our device state to the DSP API for storage */
*dev_cntxt = dev_context;
} else {
if (pt_attrs != NULL) {
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
}
kfree(pt_attrs);
kfree(dev_context);
}
func_end:
return status;
}
/*
* ======== bridge_dev_ctrl ========
* Receives device specific commands.
*/
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs)
{
int status = 0;
struct bridge_ioctl_extproc *pa_ext_proc =
(struct bridge_ioctl_extproc *)pargs;
s32 ndx;
switch (dw_cmd) {
case BRDIOCTL_CHNLREAD:
break;
case BRDIOCTL_CHNLWRITE:
break;
case BRDIOCTL_SETMMUCONFIG:
/* store away dsp-mmu setup values for later use */
for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
dev_context->atlb_entry[ndx] = *pa_ext_proc;
break;
case BRDIOCTL_DEEPSLEEP:
case BRDIOCTL_EMERGENCYSLEEP:
/* Currently only DSP Idle is supported Need to update for
* later releases */
status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
break;
case BRDIOCTL_WAKEUP:
status = wake_dsp(dev_context, pargs);
break;
case BRDIOCTL_CLK_CTRL:
status = 0;
/* Looking For Baseport Fix for Clocks */
status = dsp_peripheral_clk_ctrl(dev_context, pargs);
break;
case BRDIOCTL_PWR_HIBERNATE:
status = handle_hibernation_from_dsp(dev_context);
break;
case BRDIOCTL_PRESCALE_NOTIFY:
status = pre_scale_dsp(dev_context, pargs);
break;
case BRDIOCTL_POSTSCALE_NOTIFY:
status = post_scale_dsp(dev_context, pargs);
break;
case BRDIOCTL_CONSTRAINT_REQUEST:
status = handle_constraints_set(dev_context, pargs);
break;
default:
status = -EPERM;
break;
}
return status;
}
/*
* ======== bridge_dev_destroy ========
* Destroys the driver object.
*/
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
{
struct pg_table_attrs *pt_attrs;
int status = 0;
struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
dev_ctxt;
struct cfg_hostres *host_res;
u32 shm_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* It should never happen */
if (!dev_ctxt)
return -EFAULT;
/* first put the device to stop state */
bridge_brd_stop(dev_context);
if (dev_context->pt_attrs) {
pt_attrs = dev_context->pt_attrs;
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
kfree(pt_attrs);
}
if (dev_context->resources) {
host_res = dev_context->resources;
shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) {
if ((host_res->mem_base[1]) &&
(host_res->mem_phys[1])) {
mem_free_phys_mem((void *)
host_res->mem_base
[1],
host_res->mem_phys
[1], shm_size);
}
} else {
dev_dbg(bridge, "%s: Error getting shm size "
"from registry: %x. Not calling "
"mem_free_phys_mem\n", __func__,
status);
}
host_res->mem_base[1] = 0;
host_res->mem_phys[1] = 0;
if (host_res->mem_base[0])
iounmap((void *)host_res->mem_base[0]);
if (host_res->mem_base[2])
iounmap((void *)host_res->mem_base[2]);
if (host_res->mem_base[3])
iounmap((void *)host_res->mem_base[3]);
if (host_res->mem_base[4])
iounmap((void *)host_res->mem_base[4]);
if (host_res->dmmu_base)
iounmap(host_res->dmmu_base);
if (host_res->per_base)
iounmap(host_res->per_base);
if (host_res->per_pm_base)
iounmap((void *)host_res->per_pm_base);
if (host_res->core_pm_base)
iounmap((void *)host_res->core_pm_base);
host_res->mem_base[0] = (u32) NULL;
host_res->mem_base[2] = (u32) NULL;
host_res->mem_base[3] = (u32) NULL;
host_res->mem_base[4] = (u32) NULL;
host_res->dmmu_base = NULL;
kfree(host_res);
}
/* Free the driver's device context: */
kfree(drv_datap->base_img);
kfree((void *)dev_ctxt);
return status;
}
static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
u32 dsp_dest_addr, u32 dsp_src_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
u32 src_addr = dsp_src_addr;
u32 dest_addr = dsp_dest_addr;
u32 copy_bytes = 0;
u32 total_bytes = ul_num_bytes;
u8 host_buf[BUFFERSIZE];
struct bridge_dev_context *dev_context = dev_ctxt;
while (total_bytes > 0 && !status) {
copy_bytes =
total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
/* Read from External memory */
status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
copy_bytes, mem_type);
if (!status) {
if (dest_addr < (dev_context->dsp_start_add +
dev_context->internal_size)) {
/* Write to Internal memory */
status = write_dsp_data(dev_ctxt, host_buf,
dest_addr, copy_bytes,
mem_type);
} else {
/* Write to External memory */
status =
write_ext_dsp_data(dev_ctxt, host_buf,
dest_addr, copy_bytes,
mem_type, false);
}
}
total_bytes -= copy_bytes;
src_addr += copy_bytes;
dest_addr += copy_bytes;
}
return status;
}
/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 ul_remain_bytes = 0;
u32 ul_bytes = 0;
ul_remain_bytes = ul_num_bytes;
while (ul_remain_bytes > 0 && !status) {
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
if (dsp_addr < (dev_context->dsp_start_add +
dev_context->internal_size)) {
status =
write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_bytes, mem_type);
} else {
status = write_ext_dsp_data(dev_ctxt, host_buff,
dsp_addr, ul_bytes,
mem_type, true);
}
ul_remain_bytes -= ul_bytes;
dsp_addr += ul_bytes;
host_buff = host_buff + ul_bytes;
}
return status;
}
/*
* ======== bridge_brd_mem_map ========
* This function maps MPU buffer to the DSP address space. It performs
* linear to physical address translation if required. It translates each
* page since linear addresses can be physically non-contiguous
* All address & size arguments are assumed to be page aligned (in proc.c)
*
* TODO: Disable MMU while updating the page tables (but that'll stall DSP)
*/
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages)
{
u32 attrs;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct hw_mmu_map_attrs_t hw_attrs;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
u32 write = 0;
u32 num_usr_pgs = 0;
struct page *mapped_page, *pg;
s32 pg_num;
u32 va = virt_addr;
struct task_struct *curr_task = current;
u32 pg_i = 0;
u32 mpu_addr, pa;
dev_dbg(bridge,
"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
ul_map_attr);
if (ul_num_bytes == 0)
return -EINVAL;
if (ul_map_attr & DSP_MAP_DIR_MASK) {
attrs = ul_map_attr;
} else {
/* Assign default attributes */
attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
}
/* Take mapping properties */
if (attrs & DSP_MAPBIGENDIAN)
hw_attrs.endianism = HW_BIG_ENDIAN;
else
hw_attrs.endianism = HW_LITTLE_ENDIAN;
hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
/* Ignore element_size if mixed_size is enabled */
if (hw_attrs.mixed_size == 0) {
if (attrs & DSP_MAPELEMSIZE8) {
/* Size is 8 bit */
hw_attrs.element_size = HW_ELEM_SIZE8BIT;
} else if (attrs & DSP_MAPELEMSIZE16) {
/* Size is 16 bit */
hw_attrs.element_size = HW_ELEM_SIZE16BIT;
} else if (attrs & DSP_MAPELEMSIZE32) {
/* Size is 32 bit */
hw_attrs.element_size = HW_ELEM_SIZE32BIT;
} else if (attrs & DSP_MAPELEMSIZE64) {
/* Size is 64 bit */
hw_attrs.element_size = HW_ELEM_SIZE64BIT;
} else {
/*
* Mixedsize isn't enabled, so size can't be
* zero here
*/
return -EINVAL;
}
}
if (attrs & DSP_MAPDONOTLOCK)
hw_attrs.donotlockmpupage = 1;
else
hw_attrs.donotlockmpupage = 0;
if (attrs & DSP_MAPVMALLOCADDR) {
return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
}
/*
* Do OS-specific user-va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
if ((attrs & DSP_MAPPHYSICALADDR)) {
status = pte_update(dev_context, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
goto func_cont;
}
/*
* Important Note: ul_mpu_addr is mapped from user application process
* to current process - it must lie completely within the current
* virtual memory address space in order to be of use to us here!
*/
down_read(&mm->mmap_sem);
vma = find_vma(mm, ul_mpu_addr);
if (vma)
dev_dbg(bridge,
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
/*
* It is observed that under some circumstances, the user buffer is
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
dev_dbg(bridge,
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
__func__, ul_mpu_addr, ul_num_bytes);
status = -EINVAL;
up_read(&mm->mmap_sem);
goto func_cont;
}
if (vma->vm_flags & VM_IO) {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
mpu_addr = ul_mpu_addr;
/* Get the physical addresses for user buffer */
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pa = user_va2_pa(mm, mpu_addr);
if (!pa) {
status = -EPERM;
pr_err("DSPBRIDGE: VM_IO mapping physical"
"address is invalid\n");
break;
}
if (pfn_valid(__phys_to_pfn(pa))) {
pg = PHYS_TO_PAGE(pa);
get_page(pg);
if (page_count(pg) < 1) {
pr_err("Bad page in VM_IO buffer\n");
bad_page_dump(pa, pg);
}
}
status = pte_set(dev_context->pt_attrs, pa,
va, HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
va += HW_PAGE_SIZE4KB;
mpu_addr += HW_PAGE_SIZE4KB;
pa += HW_PAGE_SIZE4KB;
}
} else {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
write = 1;
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
write, 1, &mapped_page, NULL);
if (pg_num > 0) {
if (page_count(mapped_page) < 1) {
pr_err("Bad page count after doing"
"get_user_pages on"
"user buffer\n");
bad_page_dump(page_to_phys(mapped_page),
mapped_page);
}
status = pte_set(dev_context->pt_attrs,
page_to_phys(mapped_page), va,
HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
if (mapped_pages)
mapped_pages[pg_i] = mapped_page;
va += HW_PAGE_SIZE4KB;
ul_mpu_addr += HW_PAGE_SIZE4KB;
} else {
pr_err("DSPBRIDGE: get_user_pages FAILED,"
"MPU addr = 0x%x,"
"vma->vm_flags = 0x%lx,"
"get_user_pages Err"
"Value = %d, Buffer"
"size=0x%x\n", ul_mpu_addr,
vma->vm_flags, pg_num, ul_num_bytes);
status = -EPERM;
break;
}
}
}
up_read(&mm->mmap_sem);
func_cont:
if (status) {
/*
* Roll out the mapped pages incase it failed in middle of
* mapping
*/
if (pg_i) {
bridge_brd_mem_un_map(dev_context, virt_addr,
(pg_i * PG_SIZE4K));
}
status = -EPERM;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/*
* ======== bridge_brd_mem_un_map ========
* Invalidate the PTEs for the DSP VA block to be unmapped.
*
* PTEs of a mapped memory block are contiguous in any page table
* So, instead of looking up the PTE address for every 4K block,
* we clear consecutive PTEs until we unmap all the bytes
*/
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes)
{
u32 l1_base_va;
u32 l2_base_va;
u32 l2_base_pa;
u32 l2_page_num;
u32 pte_val;
u32 pte_size;
u32 pte_count;
u32 pte_addr_l1;
u32 pte_addr_l2 = 0;
u32 rem_bytes;
u32 rem_bytes_l2;
u32 va_curr;
struct page *pg = NULL;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt = dev_context->pt_attrs;
u32 temp;
u32 paddr;
u32 numof4k_pages = 0;
va_curr = virt_addr;
rem_bytes = ul_num_bytes;
rem_bytes_l2 = 0;
l1_base_va = pt->l1_base_va;
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
ul_num_bytes, l1_base_va, pte_addr_l1);
while (rem_bytes && !status) {
u32 va_curr_orig = va_curr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
goto skip_coarse_page;
/*
* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA
*/
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
/*
* Find the L2 PTE address from which we will start
* clearing, the number of PTEs to be cleared on this
* page, and the size of VA space that needs to be
* cleared on this L2 page
*/
pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
if (rem_bytes < (pte_count * PG_SIZE4K))
pte_count = rem_bytes / PG_SIZE4K;
rem_bytes_l2 = pte_count * PG_SIZE4K;
/*
* Unmap the VA space on this L2 PT. A quicker way
* would be to clear pte_count entries starting from
* pte_addr_l2. However, below code checks that we don't
* clear invalid entries or less than 64KB for a 64KB
* entry. Similar checking is done for L1 PTEs too
* below
*/
while (rem_bytes_l2 && !status) {
pte_val = *(u32 *) pte_addr_l2;
pte_size = hw_mmu_pte_size_l2(pte_val);
/* va_curr aligned to pte_size? */
if (pte_size == 0 || rem_bytes_l2 < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
if (pte_size == HW_PAGE_SIZE64KB)
numof4k_pages = 16;
else
numof4k_pages = 1;
temp = 0;
while (temp++ < numof4k_pages) {
if (!pfn_valid(__phys_to_pfn(paddr))) {
paddr += HW_PAGE_SIZE4KB;
continue;
}
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
paddr += HW_PAGE_SIZE4KB;
}
if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
status = -EPERM;
goto EXIT_LOOP;
}
status = 0;
rem_bytes_l2 -= pte_size;
va_curr += pte_size;
pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
}
spin_lock(&pt->pg_lock);
if (rem_bytes_l2 == 0) {
pt->pg_info[l2_page_num].num_entries -= pte_count;
if (pt->pg_info[l2_page_num].num_entries == 0) {
/*
* Clear the L1 PTE pointing to the L2 PT
*/
if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
HW_MMU_COARSE_PAGE_SIZE))
status = 0;
else {
status = -EPERM;
spin_unlock(&pt->pg_lock);
goto EXIT_LOOP;
}
}
rem_bytes -= pte_count * PG_SIZE4K;
} else
status = -EPERM;
spin_unlock(&pt->pg_lock);
continue;
skip_coarse_page:
/* va_curr aligned to pte_size? */
/* pte_size = 1 MB or 16 MB */
if (pte_size == 0 || rem_bytes < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
if (pte_size == HW_PAGE_SIZE1MB)
numof4k_pages = 256;
else
numof4k_pages = 4096;
temp = 0;
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
while (temp++ < numof4k_pages) {
if (pfn_valid(__phys_to_pfn(paddr))) {
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
}
paddr += HW_PAGE_SIZE4KB;
}
if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
status = 0;
rem_bytes -= pte_size;
va_curr += pte_size;
} else {
status = -EPERM;
goto EXIT_LOOP;
}
}
/*
* It is better to flush the TLB here, so that any stale old entries
* get flushed
*/
EXIT_LOOP:
flush_all(dev_context);
dev_dbg(bridge,
"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
pte_addr_l2, rem_bytes, rem_bytes_l2, status);
return status;
}
/*
* ======== user_va2_pa ========
* Purpose:
* This function walks through the page tables to convert a userland
* virtual address to physical address
*/
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/*
* ======== pte_update ========
* This function calculates the optimum page-aligned addresses and sizes
* Caller must pass page-aligned values
*/
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs)
{
u32 i;
u32 all_bits;
u32 pa_curr = pa;
u32 va_curr = va;
u32 num_bytes = size;
struct bridge_dev_context *dev_context = dev_ctxt;
int status = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
while (num_bytes && !status) {
/* To find the max. page size with which both PA & VA are
* aligned */
all_bits = pa_curr | va_curr;
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
pte_set(dev_context->pt_attrs, pa_curr,
va_curr, page_size[i], map_attrs);
pa_curr += page_size[i];
va_curr += page_size[i];
num_bytes -= page_size[i];
/* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size */
break;
}
}
}
return status;
}
/*
* ======== pte_set ========
* This function calculates PTE address (MPU virtual) to be updated
* It also manages the L2 page tables
*/
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs)
{
u32 i;
u32 pte_val;
u32 pte_addr_l1;
u32 pte_size;
/* Base address of the PT that will be updated */
u32 pg_tbl_va;
u32 l1_base_va;
/* Compiler warns that the next three variables might be used
* uninitialized in this function. Doesn't seem so. Working around,
* anyways. */
u32 l2_base_va = 0;
u32 l2_base_pa = 0;
u32 l2_page_num = 0;
int status = 0;
l1_base_va = pt->l1_base_va;
pg_tbl_va = l1_base_va;
if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
} else {
return -EPERM;
}
spin_lock(&pt->pg_lock);
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA */
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va =
l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa -
pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
} else if (pte_size == 0) {
/* L1 PTE is invalid. Allocate a L2 PT and
* point the L1 PTE to it */
/* Find a free L2 PT. */
for (i = 0; (i < pt->l2_num_pages) &&
(pt->pg_info[i].num_entries != 0); i++)
;
if (i < pt->l2_num_pages) {
l2_page_num = i;
l2_base_pa = pt->l2_base_pa + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
l2_base_va = pt->l2_base_va + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
/* Endianness attributes are ignored for
* HW_MMU_COARSE_PAGE_SIZE */
status =
hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
HW_MMU_COARSE_PAGE_SIZE,
attrs);
} else {
status = -ENOMEM;
}
} else {
/* Found valid L1 PTE of another size.
* Should not overwrite it. */
status = -EPERM;
}
if (!status) {
pg_tbl_va = l2_base_va;
if (size == HW_PAGE_SIZE64KB)
pt->pg_info[l2_page_num].num_entries += 16;
else
pt->pg_info[l2_page_num].num_entries++;
dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
"%x, num_entries %x\n", l2_base_va,
l2_base_pa, l2_page_num,
pt->pg_info[l2_page_num].num_entries);
}
spin_unlock(&pt->pg_lock);
}
if (!status) {
dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
pg_tbl_va, pa, va, size);
dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
"mixed_size %x\n", attrs->endianism,
attrs->element_size, attrs->mixed_size);
status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
}
return status;
}
/* Memory map kernel VA -- memory allocated with vmalloc */
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs)
{
int status = 0;
struct page *page[1];
u32 i;
u32 pa_curr;
u32 pa_next;
u32 va_curr;
u32 size_curr;
u32 num_pages;
u32 pa;
u32 num_of4k_pages;
u32 temp = 0;
/*
* Do Kernel va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
i = 0;
va_curr = ul_mpu_addr;
page[0] = vmalloc_to_page((void *)va_curr);
pa_next = page_to_phys(page[0]);
while (!status && (i < num_pages)) {
/*
* Reuse pa_next from the previous iteraion to avoid
* an extra va2pa call
*/
pa_curr = pa_next;
size_curr = PAGE_SIZE;
/*
* If the next page is physically contiguous,
* map it with the current one by increasing
* the size of the region to be mapped
*/
while (++i < num_pages) {
page[0] =
vmalloc_to_page((void *)(va_curr + size_curr));
pa_next = page_to_phys(page[0]);
if (pa_next == (pa_curr + size_curr))
size_curr += PAGE_SIZE;
else
break;
}
if (pa_next == 0) {
status = -ENOMEM;
break;
}
pa = pa_curr;
num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
while (temp++ < num_of4k_pages) {
get_page(PHYS_TO_PAGE(pa));
pa += HW_PAGE_SIZE4KB;
}
status = pte_update(dev_context, pa_curr, virt_addr +
(va_curr - ul_mpu_addr), size_curr,
hw_attrs);
va_curr += size_curr;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/*
* ======== wait_for_start ========
* Wait for the singal from DSP that it has started, or time out.
*/
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
{
u16 timeout = TIHELEN_ACKTIMEOUT;
/* Wait for response from board */
while (__raw_readw(dw_sync_addr) && --timeout)
udelay(10);
/* If timed out: return false */
if (!timeout) {
pr_err("%s: Timed out waiting DSP to Start\n", __func__);
return false;
}
return true;
}
| gpl-2.0 |
DirtyUnicorns/android_kernel_sony_apq8064 | drivers/rtc/rtc-isl12022.c | 5084 | 7567 | /*
* An I2C driver for the Intersil ISL 12022
*
* Author: Roman Fietze <roman.fietze@telemotive.de>
*
* Based on the Philips PCF8563 RTC
* by Alessandro Zummo <a.zummo@towertech.it>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
#define DRV_VERSION "0.1"
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
#define ISL12022_REG_MN 0x01
#define ISL12022_REG_HR 0x02
#define ISL12022_REG_DT 0x03
#define ISL12022_REG_MO 0x04
#define ISL12022_REG_YR 0x05
#define ISL12022_REG_DW 0x06
#define ISL12022_REG_SR 0x07
#define ISL12022_REG_INT 0x08
/* ISL register bits */
#define ISL12022_HR_MIL (1 << 7) /* military or 24 hour time */
#define ISL12022_SR_LBAT85 (1 << 2)
#define ISL12022_SR_LBAT75 (1 << 1)
#define ISL12022_INT_WRTC (1 << 6)
static struct i2c_driver isl12022_driver;
struct isl12022 {
struct rtc_device *rtc;
bool write_enabled; /* true if write enable is set */
};
static int isl12022_read_regs(struct i2c_client *client, uint8_t reg,
uint8_t *data, size_t n)
{
struct i2c_msg msgs[] = {
{
.addr = client->addr,
.flags = 0,
.len = 1,
.buf = data
}, /* setup read ptr */
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = n,
.buf = data
}
};
int ret;
data[0] = reg;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "%s: read error, ret=%d\n",
__func__, ret);
return -EIO;
}
return 0;
}
static int isl12022_write_reg(struct i2c_client *client,
uint8_t reg, uint8_t val)
{
uint8_t data[2] = { reg, val };
int err;
err = i2c_master_send(client, data, sizeof(data));
if (err != sizeof(data)) {
dev_err(&client->dev,
"%s: err=%d addr=%02x, data=%02x\n",
__func__, err, data[0], data[1]);
return -EIO;
}
return 0;
}
/*
* In the routines that deal directly with the isl12022 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
static int isl12022_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
uint8_t buf[ISL12022_REG_INT + 1];
int ret;
ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf));
if (ret)
return ret;
if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
dev_warn(&client->dev,
"voltage dropped below %u%%, "
"date and time is not reliable.\n",
buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
}
dev_dbg(&client->dev,
"%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, mon=%02x, year=%02x, wday=%02x, "
"sr=%02x, int=%02x",
__func__,
buf[ISL12022_REG_SC],
buf[ISL12022_REG_MN],
buf[ISL12022_REG_HR],
buf[ISL12022_REG_DT],
buf[ISL12022_REG_MO],
buf[ISL12022_REG_YR],
buf[ISL12022_REG_DW],
buf[ISL12022_REG_SR],
buf[ISL12022_REG_INT]);
tm->tm_sec = bcd2bin(buf[ISL12022_REG_SC] & 0x7F);
tm->tm_min = bcd2bin(buf[ISL12022_REG_MN] & 0x7F);
tm->tm_hour = bcd2bin(buf[ISL12022_REG_HR] & 0x3F);
tm->tm_mday = bcd2bin(buf[ISL12022_REG_DT] & 0x3F);
tm->tm_wday = buf[ISL12022_REG_DW] & 0x07;
tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1;
tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100;
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
/* The clock can give out invalid datetime, but we cannot return
* -EINVAL otherwise hwclock will refuse to set the time on bootup. */
if (rtc_valid_tm(tm) < 0)
dev_err(&client->dev, "retrieved date and time is invalid.\n");
return 0;
}
static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct isl12022 *isl12022 = i2c_get_clientdata(client);
size_t i;
int ret;
uint8_t buf[ISL12022_REG_DW + 1];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
if (!isl12022->write_enabled) {
ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1);
if (ret)
return ret;
/* Check if WRTC (write rtc enable) is set factory default is
* 0 (not set) */
if (!(buf[0] & ISL12022_INT_WRTC)) {
dev_info(&client->dev,
"init write enable and 24 hour format\n");
/* Set the write enable bit. */
ret = isl12022_write_reg(client,
ISL12022_REG_INT,
buf[0] | ISL12022_INT_WRTC);
if (ret)
return ret;
/* Write to any RTC register to start RTC, we use the
* HR register, setting the MIL bit to use the 24 hour
* format. */
ret = isl12022_read_regs(client, ISL12022_REG_HR,
buf, 1);
if (ret)
return ret;
ret = isl12022_write_reg(client,
ISL12022_REG_HR,
buf[0] | ISL12022_HR_MIL);
if (ret)
return ret;
}
isl12022->write_enabled = 1;
}
/* hours, minutes and seconds */
buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec);
buf[ISL12022_REG_MN] = bin2bcd(tm->tm_min);
buf[ISL12022_REG_HR] = bin2bcd(tm->tm_hour) | ISL12022_HR_MIL;
buf[ISL12022_REG_DT] = bin2bcd(tm->tm_mday);
/* month, 1 - 12 */
buf[ISL12022_REG_MO] = bin2bcd(tm->tm_mon + 1);
/* year and century */
buf[ISL12022_REG_YR] = bin2bcd(tm->tm_year % 100);
buf[ISL12022_REG_DW] = tm->tm_wday & 0x07;
/* write register's data */
for (i = 0; i < ARRAY_SIZE(buf); i++) {
ret = isl12022_write_reg(client, ISL12022_REG_SC + i,
buf[ISL12022_REG_SC + i]);
if (ret)
return -EIO;
};
return 0;
}
static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return isl12022_get_datetime(to_i2c_client(dev), tm);
}
static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return isl12022_set_datetime(to_i2c_client(dev), tm);
}
static const struct rtc_class_ops isl12022_rtc_ops = {
.read_time = isl12022_rtc_read_time,
.set_time = isl12022_rtc_set_time,
};
static int isl12022_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct isl12022 *isl12022;
int ret = 0;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
isl12022 = kzalloc(sizeof(struct isl12022), GFP_KERNEL);
if (!isl12022)
return -ENOMEM;
dev_dbg(&client->dev, "chip found, driver version " DRV_VERSION "\n");
i2c_set_clientdata(client, isl12022);
isl12022->rtc = rtc_device_register(isl12022_driver.driver.name,
&client->dev,
&isl12022_rtc_ops,
THIS_MODULE);
if (IS_ERR(isl12022->rtc)) {
ret = PTR_ERR(isl12022->rtc);
goto exit_kfree;
}
return 0;
exit_kfree:
kfree(isl12022);
return ret;
}
static int isl12022_remove(struct i2c_client *client)
{
struct isl12022 *isl12022 = i2c_get_clientdata(client);
rtc_device_unregister(isl12022->rtc);
kfree(isl12022);
return 0;
}
static const struct i2c_device_id isl12022_id[] = {
{ "isl12022", 0 },
{ "rtc8564", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl12022_id);
static struct i2c_driver isl12022_driver = {
.driver = {
.name = "rtc-isl12022",
},
.probe = isl12022_probe,
.remove = isl12022_remove,
.id_table = isl12022_id,
};
module_i2c_driver(isl12022_driver);
MODULE_AUTHOR("roman.fietze@telemotive.de");
MODULE_DESCRIPTION("ISL 12022 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
Nicklas373/Hana-CoreUX-Kernel_MSM8627-AOSP_7.0 | drivers/media/video/cx23885/altera-ci.c | 5596 | 20795 | /*
* altera-ci.c
*
* CI driver in conjunction with NetUp Dual DVB-T/C RF CI card
*
* Copyright (C) 2010,2011 NetUP Inc.
* Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* currently cx23885 GPIO's used.
* GPIO-0 ~INT in
* GPIO-1 TMS out
* GPIO-2 ~reset chips out
* GPIO-3 to GPIO-10 data/addr for CA in/out
* GPIO-11 ~CS out
* GPIO-12 AD_RG out
* GPIO-13 ~WR out
* GPIO-14 ~RD out
* GPIO-15 ~RDY in
* GPIO-16 TCK out
* GPIO-17 TDO in
* GPIO-18 TDI out
*/
/*
* Bit definitions for MC417_RWD and MC417_OEN registers
* bits 31-16
* +-----------+
* | Reserved |
* +-----------+
* bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | TDI | TDO | TCK | RDY# | #RD | #WR | AD_RG | #CS |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
#include "altera-ci.h"
#include "dvb_ca_en50221.h"
/* FPGA regs */
#define NETUP_CI_INT_CTRL 0x00
#define NETUP_CI_BUSCTRL2 0x01
#define NETUP_CI_ADDR0 0x04
#define NETUP_CI_ADDR1 0x05
#define NETUP_CI_DATA 0x06
#define NETUP_CI_BUSCTRL 0x07
#define NETUP_CI_PID_ADDR0 0x08
#define NETUP_CI_PID_ADDR1 0x09
#define NETUP_CI_PID_DATA 0x0a
#define NETUP_CI_TSA_DIV 0x0c
#define NETUP_CI_TSB_DIV 0x0d
#define NETUP_CI_REVISION 0x0f
/* const for ci op */
#define NETUP_CI_FLG_CTL 1
#define NETUP_CI_FLG_RD 1
#define NETUP_CI_FLG_AD 1
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int pid_dbg;
module_param(pid_dbg, int, 0644);
MODULE_PARM_DESC(pid_dbg, "Enable PID filtering debugging");
MODULE_DESCRIPTION("altera FPGA CI module");
MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>");
MODULE_LICENSE("GPL");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define pid_dbg_print(args...) \
do { \
if (pid_dbg) \
printk(KERN_DEBUG args); \
} while (0)
struct altera_ci_state;
struct netup_hw_pid_filter;
struct fpga_internal {
void *dev;
struct mutex fpga_mutex;/* two CI's on the same fpga */
struct netup_hw_pid_filter *pid_filt[2];
struct altera_ci_state *state[2];
struct work_struct work;
int (*fpga_rw) (void *dev, int flag, int data, int rw);
int cis_used;
int filts_used;
int strt_wrk;
};
/* stores all private variables for communication with CI */
struct altera_ci_state {
struct fpga_internal *internal;
struct dvb_ca_en50221 ca;
int status;
int nr;
};
/* stores all private variables for hardware pid filtering */
struct netup_hw_pid_filter {
struct fpga_internal *internal;
struct dvb_demux *demux;
/* save old functions */
int (*start_feed)(struct dvb_demux_feed *feed);
int (*stop_feed)(struct dvb_demux_feed *feed);
int status;
int nr;
};
/* internal params node */
struct fpga_inode {
/* pointer for internal params, one for each pair of CI's */
struct fpga_internal *internal;
struct fpga_inode *next_inode;
};
/* first internal params */
static struct fpga_inode *fpga_first_inode;
/* find chip by dev */
static struct fpga_inode *find_inode(void *dev)
{
struct fpga_inode *temp_chip = fpga_first_inode;
if (temp_chip == NULL)
return temp_chip;
/*
Search for the last fpga CI chip or
find it by dev */
while ((temp_chip != NULL) &&
(temp_chip->internal->dev != dev))
temp_chip = temp_chip->next_inode;
return temp_chip;
}
/* check demux */
static struct fpga_internal *check_filter(struct fpga_internal *temp_int,
void *demux_dev, int filt_nr)
{
if (temp_int == NULL)
return NULL;
if ((temp_int->pid_filt[filt_nr]) == NULL)
return NULL;
if (temp_int->pid_filt[filt_nr]->demux == demux_dev)
return temp_int;
return NULL;
}
/* find chip by demux */
static struct fpga_inode *find_dinode(void *demux_dev)
{
struct fpga_inode *temp_chip = fpga_first_inode;
struct fpga_internal *temp_int;
/*
* Search of the last fpga CI chip or
* find it by demux
*/
while (temp_chip != NULL) {
if (temp_chip->internal != NULL) {
temp_int = temp_chip->internal;
if (check_filter(temp_int, demux_dev, 0))
break;
if (check_filter(temp_int, demux_dev, 1))
break;
}
temp_chip = temp_chip->next_inode;
}
return temp_chip;
}
/* deallocating chip */
static void remove_inode(struct fpga_internal *internal)
{
struct fpga_inode *prev_node = fpga_first_inode;
struct fpga_inode *del_node = find_inode(internal->dev);
if (del_node != NULL) {
if (del_node == fpga_first_inode) {
fpga_first_inode = del_node->next_inode;
} else {
while (prev_node->next_inode != del_node)
prev_node = prev_node->next_inode;
if (del_node->next_inode == NULL)
prev_node->next_inode = NULL;
else
prev_node->next_inode =
prev_node->next_inode->next_inode;
}
kfree(del_node);
}
}
/* allocating new chip */
static struct fpga_inode *append_internal(struct fpga_internal *internal)
{
struct fpga_inode *new_node = fpga_first_inode;
if (new_node == NULL) {
new_node = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL);
fpga_first_inode = new_node;
} else {
while (new_node->next_inode != NULL)
new_node = new_node->next_inode;
new_node->next_inode =
kmalloc(sizeof(struct fpga_inode), GFP_KERNEL);
if (new_node->next_inode != NULL)
new_node = new_node->next_inode;
else
new_node = NULL;
}
if (new_node != NULL) {
new_node->internal = internal;
new_node->next_inode = NULL;
}
return new_node;
}
static int netup_fpga_op_rw(struct fpga_internal *inter, int addr,
u8 val, u8 read)
{
inter->fpga_rw(inter->dev, NETUP_CI_FLG_AD, addr, 0);
return inter->fpga_rw(inter->dev, 0, val, read);
}
/* flag - mem/io, read - read/write */
int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 val)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
u8 store;
int mem = 0;
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
netup_fpga_op_rw(inter, NETUP_CI_ADDR0, ((addr << 1) & 0xfe), 0);
netup_fpga_op_rw(inter, NETUP_CI_ADDR1, ((addr >> 7) & 0x7f), 0);
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
store &= 0x0f;
store |= ((state->nr << 7) | (flag << 6));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, store, 0);
mem = netup_fpga_op_rw(inter, NETUP_CI_DATA, val, read);
mutex_unlock(&inter->fpga_mutex);
ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", addr,
(flag == NETUP_CI_FLG_CTL) ? "ctl" : "mem",
(read) ? mem : val);
return mem;
}
int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0);
}
int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL,
NETUP_CI_FLG_RD, addr, 0);
}
int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data);
}
int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
/* reasonable timeout for CI reset is 10 seconds */
unsigned long t_out = jiffies + msecs_to_jiffies(9999);
int ret;
ci_dbg_print("%s\n", __func__);
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
(ret & 0xcf) | (1 << (5 - state->nr)), 0);
mutex_unlock(&inter->fpga_mutex);
for (;;) {
mdelay(50);
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
0, NETUP_CI_FLG_RD);
mutex_unlock(&inter->fpga_mutex);
if ((ret & (1 << (5 - state->nr))) == 0)
break;
if (time_after(jiffies, t_out))
break;
}
ci_dbg_print("%s: %d msecs\n", __func__,
jiffies_to_msecs(jiffies + msecs_to_jiffies(9999) - t_out));
return 0;
}
int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct altera_ci_state *state = en50221->data;
struct fpga_internal *inter = state->internal;
int ret;
ci_dbg_print("%s\n", __func__);
if (0 != slot)
return -EINVAL;
mutex_lock(&inter->fpga_mutex);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL,
(ret & 0x0f) | (1 << (3 - state->nr)), 0);
mutex_unlock(&inter->fpga_mutex);
return 0;
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct fpga_internal *inter =
container_of(work, struct fpga_internal, work);
int ret;
ci_dbg_print("%s\n", __func__);
mutex_lock(&inter->fpga_mutex);
/* ack' irq */
ret = netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0, NETUP_CI_FLG_RD);
ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD);
mutex_unlock(&inter->fpga_mutex);
if (inter->state[1] != NULL) {
inter->state[1]->status =
((ret & 1) == 0 ?
DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY : 0);
ci_dbg_print("%s: setting CI[1] status = 0x%x\n",
__func__, inter->state[1]->status);
};
if (inter->state[0] != NULL) {
inter->state[0]->status =
((ret & 2) == 0 ?
DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY : 0);
ci_dbg_print("%s: setting CI[0] status = 0x%x\n",
__func__, inter->state[0]->status);
};
}
/* CI irq handler */
int altera_ci_irq(void *dev)
{
struct fpga_inode *temp_int = NULL;
struct fpga_internal *inter = NULL;
ci_dbg_print("%s\n", __func__);
if (dev != NULL) {
temp_int = find_inode(dev);
if (temp_int != NULL) {
inter = temp_int->internal;
schedule_work(&inter->work);
}
}
return 1;
}
EXPORT_SYMBOL(altera_ci_irq);
int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot,
int open)
{
struct altera_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
return state->status;
}
void altera_hw_filt_release(void *main_dev, int filt_nr)
{
struct fpga_inode *temp_int = find_inode(main_dev);
struct netup_hw_pid_filter *pid_filt = NULL;
ci_dbg_print("%s\n", __func__);
if (temp_int != NULL) {
pid_filt = temp_int->internal->pid_filt[filt_nr - 1];
/* stored old feed controls */
pid_filt->demux->start_feed = pid_filt->start_feed;
pid_filt->demux->stop_feed = pid_filt->stop_feed;
if (((--(temp_int->internal->filts_used)) <= 0) &&
((temp_int->internal->cis_used) <= 0)) {
ci_dbg_print("%s: Actually removing\n", __func__);
remove_inode(temp_int->internal);
kfree(pid_filt->internal);
}
kfree(pid_filt);
}
}
EXPORT_SYMBOL(altera_hw_filt_release);
void altera_ci_release(void *dev, int ci_nr)
{
struct fpga_inode *temp_int = find_inode(dev);
struct altera_ci_state *state = NULL;
ci_dbg_print("%s\n", __func__);
if (temp_int != NULL) {
state = temp_int->internal->state[ci_nr - 1];
altera_hw_filt_release(dev, ci_nr);
if (((temp_int->internal->filts_used) <= 0) &&
((--(temp_int->internal->cis_used)) <= 0)) {
ci_dbg_print("%s: Actually removing\n", __func__);
remove_inode(temp_int->internal);
kfree(state->internal);
}
if (state != NULL) {
if (state->ca.data != NULL)
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
}
}
EXPORT_SYMBOL(altera_ci_release);
static void altera_pid_control(struct netup_hw_pid_filter *pid_filt,
u16 pid, int onoff)
{
struct fpga_internal *inter = pid_filt->internal;
u8 store = 0;
/* pid 0-0x1f always enabled, don't touch them */
if ((pid == 0x2000) || (pid < 0x20))
return;
mutex_lock(&inter->fpga_mutex);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, (pid >> 3) & 0xff, 0);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1,
((pid >> 11) & 0x03) | (pid_filt->nr << 2), 0);
store = netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, 0, NETUP_CI_FLG_RD);
if (onoff)/* 0 - on, 1 - off */
store |= (1 << (pid & 7));
else
store &= ~(1 << (pid & 7));
netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, store, 0);
mutex_unlock(&inter->fpga_mutex);
pid_dbg_print("%s: (%d) set pid: %5d 0x%04x '%s'\n", __func__,
pid_filt->nr, pid, pid, onoff ? "off" : "on");
}
static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt,
int filt_nr, int onoff)
{
struct fpga_internal *inter = pid_filt->internal;
u8 store = 0;
int i;
pid_dbg_print("%s: pid_filt->nr[%d] now %s\n", __func__, pid_filt->nr,
onoff ? "off" : "on");
if (onoff)/* 0 - on, 1 - off */
store = 0xff;/* ignore pid */
else
store = 0;/* enable pid */
mutex_lock(&inter->fpga_mutex);
for (i = 0; i < 1024; i++) {
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, i & 0xff, 0);
netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1,
((i >> 8) & 0x03) | (pid_filt->nr << 2), 0);
/* pid 0-0x1f always enabled */
netup_fpga_op_rw(inter, NETUP_CI_PID_DATA,
(i > 3 ? store : 0), 0);
}
mutex_unlock(&inter->fpga_mutex);
}
int altera_pid_feed_control(void *demux_dev, int filt_nr,
struct dvb_demux_feed *feed, int onoff)
{
struct fpga_inode *temp_int = find_dinode(demux_dev);
struct fpga_internal *inter = temp_int->internal;
struct netup_hw_pid_filter *pid_filt = inter->pid_filt[filt_nr - 1];
altera_pid_control(pid_filt, feed->pid, onoff ? 0 : 1);
/* call old feed proc's */
if (onoff)
pid_filt->start_feed(feed);
else
pid_filt->stop_feed(feed);
if (feed->pid == 0x2000)
altera_toggle_fullts_streaming(pid_filt, filt_nr,
onoff ? 0 : 1);
return 0;
}
EXPORT_SYMBOL(altera_pid_feed_control);
int altera_ci_start_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 1);
return 0;
}
int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num)
{
altera_pid_feed_control(feed->demux, num, feed, 0);
return 0;
}
int altera_ci_start_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 1);
}
int altera_ci_stop_feed_1(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 1);
}
int altera_ci_start_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_start_feed(feed, 2);
}
int altera_ci_stop_feed_2(struct dvb_demux_feed *feed)
{
return altera_ci_stop_feed(feed, 2);
}
int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
{
struct netup_hw_pid_filter *pid_filt = NULL;
struct fpga_inode *temp_int = find_inode(config->dev);
struct fpga_internal *inter = NULL;
int ret = 0;
pid_filt = kzalloc(sizeof(struct netup_hw_pid_filter), GFP_KERNEL);
ci_dbg_print("%s\n", __func__);
if (!pid_filt) {
ret = -ENOMEM;
goto err;
}
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->filts_used)++;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
if (!inter) {
ret = -ENOMEM;
goto err;
}
temp_int = append_internal(inter);
inter->filts_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
mutex_init(&inter->fpga_mutex);
inter->strt_wrk = 1;
ci_dbg_print("%s: Create New Internal Structure!\n", __func__);
}
ci_dbg_print("%s: setting hw pid filter = %p for ci = %d\n", __func__,
pid_filt, hw_filt_nr - 1);
inter->pid_filt[hw_filt_nr - 1] = pid_filt;
pid_filt->demux = config->demux;
pid_filt->internal = inter;
pid_filt->nr = hw_filt_nr - 1;
/* store old feed controls */
pid_filt->start_feed = config->demux->start_feed;
pid_filt->stop_feed = config->demux->stop_feed;
/* replace with new feed controls */
if (hw_filt_nr == 1) {
pid_filt->demux->start_feed = altera_ci_start_feed_1;
pid_filt->demux->stop_feed = altera_ci_stop_feed_1;
} else if (hw_filt_nr == 2) {
pid_filt->demux->start_feed = altera_ci_start_feed_2;
pid_filt->demux->stop_feed = altera_ci_stop_feed_2;
}
altera_toggle_fullts_streaming(pid_filt, 0, 1);
return 0;
err:
ci_dbg_print("%s: Can't init hardware filter: Error %d\n",
__func__, ret);
kfree(pid_filt);
return ret;
}
EXPORT_SYMBOL(altera_hw_filt_init);
int altera_ci_init(struct altera_ci_config *config, int ci_nr)
{
struct altera_ci_state *state;
struct fpga_inode *temp_int = find_inode(config->dev);
struct fpga_internal *inter = NULL;
int ret = 0;
u8 store = 0;
state = kzalloc(sizeof(struct altera_ci_state), GFP_KERNEL);
ci_dbg_print("%s\n", __func__);
if (!state) {
ret = -ENOMEM;
goto err;
}
if (temp_int != NULL) {
inter = temp_int->internal;
(inter->cis_used)++;
ci_dbg_print("%s: Find Internal Structure!\n", __func__);
} else {
inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
if (!inter) {
ret = -ENOMEM;
goto err;
}
temp_int = append_internal(inter);
inter->cis_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
mutex_init(&inter->fpga_mutex);
inter->strt_wrk = 1;
ci_dbg_print("%s: Create New Internal Structure!\n", __func__);
}
ci_dbg_print("%s: setting state = %p for ci = %d\n", __func__,
state, ci_nr - 1);
inter->state[ci_nr - 1] = state;
state->internal = inter;
state->nr = ci_nr - 1;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = altera_ci_read_attribute_mem;
state->ca.write_attribute_mem = altera_ci_write_attribute_mem;
state->ca.read_cam_control = altera_ci_read_cam_ctl;
state->ca.write_cam_control = altera_ci_write_cam_ctl;
state->ca.slot_reset = altera_ci_slot_reset;
state->ca.slot_shutdown = altera_ci_slot_shutdown;
state->ca.slot_ts_enable = altera_ci_slot_ts_ctl;
state->ca.poll_slot_status = altera_poll_ci_slot_status;
state->ca.data = state;
ret = dvb_ca_en50221_init(config->adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
altera_hw_filt_init(config, ci_nr);
if (inter->strt_wrk) {
INIT_WORK(&inter->work, netup_read_ci_status);
inter->strt_wrk = 0;
}
ci_dbg_print("%s: CI initialized!\n", __func__);
mutex_lock(&inter->fpga_mutex);
/* Enable div */
netup_fpga_op_rw(inter, NETUP_CI_TSA_DIV, 0x0, 0);
netup_fpga_op_rw(inter, NETUP_CI_TSB_DIV, 0x0, 0);
/* enable TS out */
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD);
store |= (3 << 4);
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
ret = netup_fpga_op_rw(inter, NETUP_CI_REVISION, 0, NETUP_CI_FLG_RD);
/* enable irq */
netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0x44, 0);
mutex_unlock(&inter->fpga_mutex);
ci_dbg_print("%s: NetUP CI Revision = 0x%x\n", __func__, ret);
schedule_work(&inter->work);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
EXPORT_SYMBOL(altera_ci_init);
int altera_ci_tuner_reset(void *dev, int ci_nr)
{
struct fpga_inode *temp_int = find_inode(dev);
struct fpga_internal *inter = NULL;
u8 store;
ci_dbg_print("%s\n", __func__);
if (temp_int == NULL)
return -1;
if (temp_int->internal == NULL)
return -1;
inter = temp_int->internal;
mutex_lock(&inter->fpga_mutex);
store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD);
store &= ~(4 << (2 - ci_nr));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
msleep(100);
store |= (4 << (2 - ci_nr));
netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0);
mutex_unlock(&inter->fpga_mutex);
return 0;
}
EXPORT_SYMBOL(altera_ci_tuner_reset);
| gpl-2.0 |
TEAM-RAZOR-DEVICES/kernel_lge_g3 | drivers/net/ethernet/xscale/ixp2000/pm3386.c | 9948 | 8256 | /*
* Helper functions for the PM3386s on the Radisys ENP2611
* Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
* Dedicated to Marija Kulikova.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <asm/io.h>
#include "pm3386.h"
/*
* Read from register 'reg' of PM3386 device 'pm'.
*/
static u16 pm3386_reg_read(int pm, int reg)
{
void *_reg;
u16 value;
_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
if (pm == 1)
_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
value = *((volatile u16 *)(_reg + (reg << 1)));
// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
return value;
}
/*
* Write to register 'reg' of PM3386 device 'pm', and perform
* a readback from the identification register.
*/
static void pm3386_reg_write(int pm, int reg, u16 value)
{
void *_reg;
u16 dummy;
// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
_reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
if (pm == 1)
_reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
*((volatile u16 *)(_reg + (reg << 1))) = value;
dummy = *((volatile u16 *)_reg);
__asm__ __volatile__("mov %0, %0" : "+r" (dummy));
}
/*
* Read from port 'port' register 'reg', where the registers
* for the different ports are 'spacing' registers apart.
*/
static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
{
int reg;
reg = _reg;
if (port & 1)
reg += spacing;
return pm3386_reg_read(port >> 1, reg);
}
/*
* Write to port 'port' register 'reg', where the registers
* for the different ports are 'spacing' registers apart.
*/
static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
{
int reg;
reg = _reg;
if (port & 1)
reg += spacing;
pm3386_reg_write(port >> 1, reg, value);
}
int pm3386_secondary_present(void)
{
return pm3386_reg_read(1, 0) == 0x3386;
}
void pm3386_reset(void)
{
u8 mac[3][6];
int secondary;
secondary = pm3386_secondary_present();
/* Save programmed MAC addresses. */
pm3386_get_mac(0, mac[0]);
pm3386_get_mac(1, mac[1]);
if (secondary)
pm3386_get_mac(2, mac[2]);
/* Assert analog and digital reset. */
pm3386_reg_write(0, 0x002, 0x0060);
if (secondary)
pm3386_reg_write(1, 0x002, 0x0060);
mdelay(1);
/* Deassert analog reset. */
pm3386_reg_write(0, 0x002, 0x0062);
if (secondary)
pm3386_reg_write(1, 0x002, 0x0062);
mdelay(10);
/* Deassert digital reset. */
pm3386_reg_write(0, 0x002, 0x0063);
if (secondary)
pm3386_reg_write(1, 0x002, 0x0063);
mdelay(10);
/* Restore programmed MAC addresses. */
pm3386_set_mac(0, mac[0]);
pm3386_set_mac(1, mac[1]);
if (secondary)
pm3386_set_mac(2, mac[2]);
/* Disable carrier on all ports. */
pm3386_set_carrier(0, 0);
pm3386_set_carrier(1, 0);
if (secondary)
pm3386_set_carrier(2, 0);
}
static u16 swaph(u16 x)
{
return ((x << 8) | (x >> 8)) & 0xffff;
}
int pm3386_port_count(void)
{
return 2 + pm3386_secondary_present();
}
void pm3386_init_port(int port)
{
int pm = port >> 1;
/*
* Work around ENP2611 bootloader programming MAC address
* in reverse.
*/
if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
(pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
u16 temp[3];
temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
}
/*
* Initialise narrowbanding mode. See application note 2010486
* for more information. (@@@ We also need to issue a reset
* when ROOL or DOOL are detected.)
*/
pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
udelay(500);
pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
/*
* SPI-3 ingress block. Set 64 bytes SPI-3 burst size
* towards SPI-3 bridge.
*/
pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
/*
* Enable ingress protocol checking, and soft reset the
* SPI-3 ingress block.
*/
pm3386_reg_write(pm, 0x103, 0x0003);
while (!(pm3386_reg_read(pm, 0x103) & 0x80))
;
/*
* SPI-3 egress block. Gather 12288 bytes of the current
* packet in the TX fifo before initiating transmit on the
* SERDES interface. (Prevents TX underflows.)
*/
pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
/*
* Enforce odd parity from the SPI-3 bridge, and soft reset
* the SPI-3 egress block.
*/
pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
;
/*
* EGMAC block. Set this channels to reject long preambles,
* not send or transmit PAUSE frames, enable preamble checking,
* disable frame length checking, enable FCS appending, enable
* TX frame padding.
*/
pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
/*
* Soft reset the EGMAC block.
*/
pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
/*
* Auto-sense autonegotiation status.
*/
pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
/*
* Allow reception of jumbo frames.
*/
pm3386_port_reg_write(port, 0x310, 0x100, 9018);
/*
* Allow transmission of jumbo frames.
*/
pm3386_port_reg_write(port, 0x336, 0x100, 9018);
/* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
/*
* Set autonegotiation parameters to 'no PAUSE, full duplex.'
*/
pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
/*
* Enable and restart autonegotiation.
*/
pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
}
void pm3386_get_mac(int port, u8 *mac)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x308, 0x100);
mac[0] = temp & 0xff;
mac[1] = (temp >> 8) & 0xff;
temp = pm3386_port_reg_read(port, 0x309, 0x100);
mac[2] = temp & 0xff;
mac[3] = (temp >> 8) & 0xff;
temp = pm3386_port_reg_read(port, 0x30a, 0x100);
mac[4] = temp & 0xff;
mac[5] = (temp >> 8) & 0xff;
}
void pm3386_set_mac(int port, u8 *mac)
{
pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
}
static u32 pm3386_get_stat(int port, u16 base)
{
u32 value;
value = pm3386_port_reg_read(port, base, 0x100);
value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
return value;
}
void pm3386_get_stats(int port, struct net_device_stats *stats)
{
/*
* Snapshot statistics counters.
*/
pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
;
memset(stats, 0, sizeof(*stats));
stats->rx_packets = pm3386_get_stat(port, 0x510);
stats->tx_packets = pm3386_get_stat(port, 0x590);
stats->rx_bytes = pm3386_get_stat(port, 0x514);
stats->tx_bytes = pm3386_get_stat(port, 0x594);
/* @@@ Add other stats. */
}
void pm3386_set_carrier(int port, int state)
{
pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
}
int pm3386_is_link_up(int port)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x31a, 0x100);
temp = pm3386_port_reg_read(port, 0x31a, 0x100);
return !!(temp & 0x0002);
}
void pm3386_enable_rx(int port)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x303, 0x100);
temp |= 0x1000;
pm3386_port_reg_write(port, 0x303, 0x100, temp);
}
void pm3386_disable_rx(int port)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x303, 0x100);
temp &= 0xefff;
pm3386_port_reg_write(port, 0x303, 0x100, temp);
}
void pm3386_enable_tx(int port)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x303, 0x100);
temp |= 0x4000;
pm3386_port_reg_write(port, 0x303, 0x100, temp);
}
void pm3386_disable_tx(int port)
{
u16 temp;
temp = pm3386_port_reg_read(port, 0x303, 0x100);
temp &= 0xbfff;
pm3386_port_reg_write(port, 0x303, 0x100, temp);
}
MODULE_LICENSE("GPL");
| gpl-2.0 |
TeamExodus/kernel_samsung_smdk4412 | drivers/scsi/fnic/vnic_wq.c | 14044 | 4356 | /*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
printk(KERN_ERR "Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
| gpl-2.0 |
xhp/git | builtin/mv.c | 221 | 8196 | /*
* "git mv" builtin command
*
* Copyright (C) 2006 Johannes Schindelin
*/
#include "builtin.h"
#include "lockfile.h"
#include "dir.h"
#include "cache-tree.h"
#include "string-list.h"
#include "parse-options.h"
#include "submodule.h"
static const char * const builtin_mv_usage[] = {
N_("git mv [<options>] <source>... <destination>"),
NULL
};
#define DUP_BASENAME 1
#define KEEP_TRAILING_SLASH 2
static const char **internal_copy_pathspec(const char *prefix,
const char **pathspec,
int count, unsigned flags)
{
int i;
const char **result = xmalloc((count + 1) * sizeof(const char *));
memcpy(result, pathspec, count * sizeof(const char *));
result[count] = NULL;
for (i = 0; i < count; i++) {
int length = strlen(result[i]);
int to_copy = length;
while (!(flags & KEEP_TRAILING_SLASH) &&
to_copy > 0 && is_dir_sep(result[i][to_copy - 1]))
to_copy--;
if (to_copy != length || flags & DUP_BASENAME) {
char *it = xmemdupz(result[i], to_copy);
if (flags & DUP_BASENAME) {
result[i] = xstrdup(basename(it));
free(it);
} else
result[i] = it;
}
}
return get_pathspec(prefix, result);
}
static const char *add_slash(const char *path)
{
int len = strlen(path);
if (path[len - 1] != '/') {
char *with_slash = xmalloc(len + 2);
memcpy(with_slash, path, len);
with_slash[len++] = '/';
with_slash[len] = 0;
return with_slash;
}
return path;
}
static struct lock_file lock_file;
#define SUBMODULE_WITH_GITDIR ((const char *)1)
static void prepare_move_submodule(const char *src, int first,
const char **submodule_gitfile)
{
struct strbuf submodule_dotgit = STRBUF_INIT;
if (!S_ISGITLINK(active_cache[first]->ce_mode))
die(_("Directory %s is in index and no submodule?"), src);
if (!is_staging_gitmodules_ok())
die(_("Please stage your changes to .gitmodules or stash them to proceed"));
strbuf_addf(&submodule_dotgit, "%s/.git", src);
*submodule_gitfile = read_gitfile(submodule_dotgit.buf);
if (*submodule_gitfile)
*submodule_gitfile = xstrdup(*submodule_gitfile);
else
*submodule_gitfile = SUBMODULE_WITH_GITDIR;
strbuf_release(&submodule_dotgit);
}
static int index_range_of_same_dir(const char *src, int length,
int *first_p, int *last_p)
{
const char *src_w_slash = add_slash(src);
int first, last, len_w_slash = length + 1;
first = cache_name_pos(src_w_slash, len_w_slash);
if (first >= 0)
die(_("%.*s is in index"), len_w_slash, src_w_slash);
first = -1 - first;
for (last = first; last < active_nr; last++) {
const char *path = active_cache[last]->name;
if (strncmp(path, src_w_slash, len_w_slash))
break;
}
if (src_w_slash != src)
free((char *)src_w_slash);
*first_p = first;
*last_p = last;
return last - first;
}
int cmd_mv(int argc, const char **argv, const char *prefix)
{
int i, gitmodules_modified = 0;
int verbose = 0, show_only = 0, force = 0, ignore_errors = 0;
struct option builtin_mv_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
OPT__DRY_RUN(&show_only, N_("dry run")),
OPT__FORCE(&force, N_("force move/rename even if target exists")),
OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
OPT_END(),
};
const char **source, **destination, **dest_path, **submodule_gitfile;
enum update_mode { BOTH = 0, WORKING_DIRECTORY, INDEX } *modes;
struct stat st;
struct string_list src_for_dst = STRING_LIST_INIT_NODUP;
gitmodules_config();
git_config(git_default_config, NULL);
argc = parse_options(argc, argv, prefix, builtin_mv_options,
builtin_mv_usage, 0);
if (--argc < 1)
usage_with_options(builtin_mv_usage, builtin_mv_options);
hold_locked_index(&lock_file, 1);
if (read_cache() < 0)
die(_("index file corrupt"));
source = internal_copy_pathspec(prefix, argv, argc, 0);
modes = xcalloc(argc, sizeof(enum update_mode));
/*
* Keep trailing slash, needed to let
* "git mv file no-such-dir/" error out.
*/
dest_path = internal_copy_pathspec(prefix, argv + argc, 1,
KEEP_TRAILING_SLASH);
submodule_gitfile = xcalloc(argc, sizeof(char *));
if (dest_path[0][0] == '\0')
/* special case: "." was normalized to "" */
destination = internal_copy_pathspec(dest_path[0], argv, argc, DUP_BASENAME);
else if (!lstat(dest_path[0], &st) &&
S_ISDIR(st.st_mode)) {
dest_path[0] = add_slash(dest_path[0]);
destination = internal_copy_pathspec(dest_path[0], argv, argc, DUP_BASENAME);
} else {
if (argc != 1)
die(_("destination '%s' is not a directory"), dest_path[0]);
destination = dest_path;
}
/* Checking */
for (i = 0; i < argc; i++) {
const char *src = source[i], *dst = destination[i];
int length, src_is_dir;
const char *bad = NULL;
if (show_only)
printf(_("Checking rename of '%s' to '%s'\n"), src, dst);
length = strlen(src);
if (lstat(src, &st) < 0)
bad = _("bad source");
else if (!strncmp(src, dst, length) &&
(dst[length] == 0 || dst[length] == '/')) {
bad = _("can not move directory into itself");
} else if ((src_is_dir = S_ISDIR(st.st_mode))
&& lstat(dst, &st) == 0)
bad = _("cannot move directory over file");
else if (src_is_dir) {
int first = cache_name_pos(src, length), last;
if (first >= 0)
prepare_move_submodule(src, first,
submodule_gitfile + i);
else if (index_range_of_same_dir(src, length,
&first, &last) < 1)
bad = _("source directory is empty");
else { /* last - first >= 1 */
int j, dst_len, n;
modes[i] = WORKING_DIRECTORY;
n = argc + last - first;
REALLOC_ARRAY(source, n);
REALLOC_ARRAY(destination, n);
REALLOC_ARRAY(modes, n);
REALLOC_ARRAY(submodule_gitfile, n);
dst = add_slash(dst);
dst_len = strlen(dst);
for (j = 0; j < last - first; j++) {
const char *path = active_cache[first + j]->name;
source[argc + j] = path;
destination[argc + j] =
prefix_path(dst, dst_len, path + length + 1);
modes[argc + j] = INDEX;
submodule_gitfile[argc + j] = NULL;
}
argc += last - first;
}
} else if (cache_name_pos(src, length) < 0)
bad = _("not under version control");
else if (lstat(dst, &st) == 0 &&
(!ignore_case || strcasecmp(src, dst))) {
bad = _("destination exists");
if (force) {
/*
* only files can overwrite each other:
* check both source and destination
*/
if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) {
if (verbose)
warning(_("overwriting '%s'"), dst);
bad = NULL;
} else
bad = _("Cannot overwrite");
}
} else if (string_list_has_string(&src_for_dst, dst))
bad = _("multiple sources for the same target");
else if (is_dir_sep(dst[strlen(dst) - 1]))
bad = _("destination directory does not exist");
else
string_list_insert(&src_for_dst, dst);
if (!bad)
continue;
if (!ignore_errors)
die(_("%s, source=%s, destination=%s"),
bad, src, dst);
if (--argc > 0) {
int n = argc - i;
memmove(source + i, source + i + 1,
n * sizeof(char *));
memmove(destination + i, destination + i + 1,
n * sizeof(char *));
memmove(modes + i, modes + i + 1,
n * sizeof(enum update_mode));
memmove(submodule_gitfile + i, submodule_gitfile + i + 1,
n * sizeof(char *));
i--;
}
}
for (i = 0; i < argc; i++) {
const char *src = source[i], *dst = destination[i];
enum update_mode mode = modes[i];
int pos;
if (show_only || verbose)
printf(_("Renaming %s to %s\n"), src, dst);
if (!show_only && mode != INDEX) {
if (rename(src, dst) < 0 && !ignore_errors)
die_errno(_("renaming '%s' failed"), src);
if (submodule_gitfile[i]) {
if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
connect_work_tree_and_git_dir(dst, submodule_gitfile[i]);
if (!update_path_in_gitmodules(src, dst))
gitmodules_modified = 1;
}
}
if (mode == WORKING_DIRECTORY)
continue;
pos = cache_name_pos(src, strlen(src));
assert(pos >= 0);
if (!show_only)
rename_cache_entry_at(pos, dst);
}
if (gitmodules_modified)
stage_updated_gitmodules();
if (active_cache_changed &&
write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("Unable to write new index file"));
return 0;
}
| gpl-2.0 |
TeslaProject/android_kernel_moto_shamu | arch/arm/mach-omap2/timer.c | 221 | 19898 | /*
* linux/arch/arm/mach-omap2/timer.c
*
* OMAP2 GP timer support.
*
* Copyright (C) 2009 Nokia Corporation
*
* Update to use new clocksource/clockevent layers
* Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
* Copyright (C) 2007 MontaVista Software, Inc.
*
* Original driver:
* Copyright (C) 2005 Nokia Corporation
* Author: Paul Mundt <paul.mundt@nokia.com>
* Juha Yrjölä <juha.yrjola@nokia.com>
* OMAP Dual-mode timer framework support by Timo Teras
*
* Some parts based off of TI's 24xx code:
*
* Copyright (C) 2004-2009 Texas Instruments, Inc.
*
* Roughly modelled after the OMAP1 MPU timer code.
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dmtimer-omap.h>
#include <asm/mach/time.h>
#include <asm/smp_twd.h>
#include <asm/sched_clock.h>
#include "omap_hwmod.h"
#include "omap_device.h"
#include <plat/counter-32k.h>
#include <plat/dmtimer.h>
#include "omap-pm.h"
#include "soc.h"
#include "common.h"
#include "powerdomain.h"
#define REALTIME_COUNTER_BASE 0x48243200
#define INCREMENTER_NUMERATOR_OFFSET 0x10
#define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14
#define NUMERATOR_DENUMERATOR_MASK 0xfffff000
/* Clockevent code */
static struct omap_dm_timer clkev;
static struct clock_event_device clockevent_gpt;
static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_gpt;
__omap_dm_timer_write_status(&clkev, OMAP_TIMER_INT_OVERFLOW);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction omap2_gp_timer_irq = {
.name = "gp_timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap2_gp_timer_interrupt,
};
static int omap2_gp_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
__omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_ST,
0xffffffff - cycles, OMAP_TIMER_POSTED);
return 0;
}
static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
u32 period;
__omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
period = clkev.rate / HZ;
period -= 1;
/* Looks like we need to first set the load value separately */
__omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG,
0xffffffff - period, OMAP_TIMER_POSTED);
__omap_dm_timer_load_start(&clkev,
OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
0xffffffff - period, OMAP_TIMER_POSTED);
break;
case CLOCK_EVT_MODE_ONESHOT:
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static struct clock_event_device clockevent_gpt = {
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = omap2_gp_timer_set_next_event,
.set_mode = omap2_gp_timer_set_mode,
};
static struct property device_disabled = {
.name = "status",
.length = sizeof("disabled"),
.value = "disabled",
};
static struct of_device_id omap_timer_match[] __initdata = {
{ .compatible = "ti,omap2420-timer", },
{ .compatible = "ti,omap3430-timer", },
{ .compatible = "ti,omap4430-timer", },
{ .compatible = "ti,omap5430-timer", },
{ .compatible = "ti,am335x-timer", },
{ .compatible = "ti,am335x-timer-1ms", },
{ }
};
/**
* omap_get_timer_dt - get a timer using device-tree
* @match - device-tree match structure for matching a device type
* @property - optional timer property to match
*
* Helper function to get a timer during early boot using device-tree for use
* as kernel system timer. Optionally, the property argument can be used to
* select a timer with a specific property. Once a timer is found then mark
* the timer node in device-tree as disabled, to prevent the kernel from
* registering this timer as a platform device and so no one else can use it.
*/
static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
const char *property)
{
struct device_node *np;
for_each_matching_node(np, match) {
if (!of_device_is_available(np))
continue;
if (property && !of_get_property(np, property, NULL))
continue;
if (!property && (of_get_property(np, "ti,timer-alwon", NULL) ||
of_get_property(np, "ti,timer-dsp", NULL) ||
of_get_property(np, "ti,timer-pwm", NULL) ||
of_get_property(np, "ti,timer-secure", NULL)))
continue;
of_add_property(np, &device_disabled);
return np;
}
return NULL;
}
/**
* omap_dmtimer_init - initialisation function when device tree is used
*
* For secure OMAP3 devices, timers with device type "timer-secure" cannot
* be used by the kernel as they are reserved. Therefore, to prevent the
* kernel registering these devices remove them dynamically from the device
* tree on boot.
*/
static void __init omap_dmtimer_init(void)
{
struct device_node *np;
if (!cpu_is_omap34xx())
return;
/* If we are a secure device, remove any secure timer nodes */
if ((omap_type() != OMAP2_DEVICE_TYPE_GP)) {
np = omap_get_timer_dt(omap_timer_match, "ti,timer-secure");
if (np)
of_node_put(np);
}
}
/**
* omap_dm_timer_get_errata - get errata flags for a timer
*
* Get the timer errata flags that are specific to the OMAP device being used.
*/
static u32 __init omap_dm_timer_get_errata(void)
{
if (cpu_is_omap24xx())
return 0;
return OMAP_TIMER_ERRATA_I103_I767;
}
static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
const char *fck_source,
const char *property,
const char **timer_name,
int posted)
{
char name[10]; /* 10 = sizeof("gptXX_Xck0") */
const char *oh_name;
struct device_node *np;
struct omap_hwmod *oh;
struct resource irq, mem;
struct clk *src;
int r = 0;
if (of_have_populated_dt()) {
np = omap_get_timer_dt(omap_timer_match, property);
if (!np)
return -ENODEV;
of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
if (!oh_name)
return -ENODEV;
timer->irq = irq_of_parse_and_map(np, 0);
if (!timer->irq)
return -ENXIO;
timer->io_base = of_iomap(np, 0);
of_node_put(np);
} else {
if (omap_dm_timer_reserve_systimer(timer->id))
return -ENODEV;
sprintf(name, "timer%d", timer->id);
oh_name = name;
}
oh = omap_hwmod_lookup(oh_name);
if (!oh)
return -ENODEV;
*timer_name = oh->name;
if (!of_have_populated_dt()) {
r = omap_hwmod_get_resource_byname(oh, IORESOURCE_IRQ, NULL,
&irq);
if (r)
return -ENXIO;
timer->irq = irq.start;
r = omap_hwmod_get_resource_byname(oh, IORESOURCE_MEM, NULL,
&mem);
if (r)
return -ENXIO;
/* Static mapping, never released */
timer->io_base = ioremap(mem.start, mem.end - mem.start);
}
if (!timer->io_base)
return -ENXIO;
/* After the dmtimer is using hwmod these clocks won't be needed */
timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
if (IS_ERR(timer->fclk))
return PTR_ERR(timer->fclk);
src = clk_get(NULL, fck_source);
if (IS_ERR(src))
return PTR_ERR(src);
if (clk_get_parent(timer->fclk) != src) {
r = clk_set_parent(timer->fclk, src);
if (r < 0) {
pr_warn("%s: %s cannot set source\n", __func__,
oh->name);
clk_put(src);
return r;
}
}
clk_put(src);
omap_hwmod_setup_one(oh_name);
omap_hwmod_enable(oh);
__omap_dm_timer_init_regs(timer);
if (posted)
__omap_dm_timer_enable_posted(timer);
/* Check that the intended posted configuration matches the actual */
if (posted != timer->posted)
return -EINVAL;
timer->rate = clk_get_rate(timer->fclk);
timer->reserved = 1;
return r;
}
static void __init omap2_gp_clockevent_init(int gptimer_id,
const char *fck_source,
const char *property)
{
int res;
clkev.id = gptimer_id;
clkev.errata = omap_dm_timer_get_errata();
/*
* For clock-event timers we never read the timer counter and
* so we are not impacted by errata i103 and i767. Therefore,
* we can safely ignore this errata for clock-event timers.
*/
__omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767);
res = omap_dm_timer_init_one(&clkev, fck_source, property,
&clockevent_gpt.name, OMAP_TIMER_POSTED);
BUG_ON(res);
omap2_gp_timer_irq.dev_id = &clkev;
setup_irq(clkev.irq, &omap2_gp_timer_irq);
__omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
clockevent_gpt.cpumask = cpu_possible_mask;
clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
clockevents_config_and_register(&clockevent_gpt, clkev.rate,
3, /* Timer internal resynch latency */
0xffffffff);
pr_info("OMAP clockevent source: %s at %lu Hz\n", clockevent_gpt.name,
clkev.rate);
}
/* Clocksource code */
static struct omap_dm_timer clksrc;
static bool use_gptimer_clksrc;
/*
* clocksource
*/
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
return (cycle_t)__omap_dm_timer_read_counter(&clksrc,
OMAP_TIMER_NONPOSTED);
}
static struct clocksource clocksource_gpt = {
.rating = 300,
.read = clocksource_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static u32 notrace dmtimer_read_sched_clock(void)
{
if (clksrc.reserved)
return __omap_dm_timer_read_counter(&clksrc,
OMAP_TIMER_NONPOSTED);
return 0;
}
static struct of_device_id omap_counter_match[] __initdata = {
{ .compatible = "ti,omap-counter32k", },
{ }
};
/* Setup free-running counter for clocksource */
static int __init __maybe_unused omap2_sync32k_clocksource_init(void)
{
int ret;
struct device_node *np = NULL;
struct omap_hwmod *oh;
void __iomem *vbase;
const char *oh_name = "counter_32k";
/*
* If device-tree is present, then search the DT blob
* to see if the 32kHz counter is supported.
*/
if (of_have_populated_dt()) {
np = omap_get_timer_dt(omap_counter_match, NULL);
if (!np)
return -ENODEV;
of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
if (!oh_name)
return -ENODEV;
}
/*
* First check hwmod data is available for sync32k counter
*/
oh = omap_hwmod_lookup(oh_name);
if (!oh || oh->slaves_cnt == 0)
return -ENODEV;
omap_hwmod_setup_one(oh_name);
if (np) {
vbase = of_iomap(np, 0);
of_node_put(np);
} else {
vbase = omap_hwmod_get_mpu_rt_va(oh);
}
if (!vbase) {
pr_warn("%s: failed to get counter_32k resource\n", __func__);
return -ENXIO;
}
ret = omap_hwmod_enable(oh);
if (ret) {
pr_warn("%s: failed to enable counter_32k module (%d)\n",
__func__, ret);
return ret;
}
ret = omap_init_clocksource_32k(vbase);
if (ret) {
pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
__func__, ret);
omap_hwmod_idle(oh);
}
return ret;
}
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source,
const char *property)
{
int res;
clksrc.id = gptimer_id;
clksrc.errata = omap_dm_timer_get_errata();
res = omap_dm_timer_init_one(&clksrc, fck_source, property,
&clocksource_gpt.name,
OMAP_TIMER_NONPOSTED);
BUG_ON(res);
__omap_dm_timer_load_start(&clksrc,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0,
OMAP_TIMER_NONPOSTED);
setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
pr_err("Could not register clocksource %s\n",
clocksource_gpt.name);
else
pr_info("OMAP clocksource: %s at %lu Hz\n",
clocksource_gpt.name, clksrc.rate);
}
#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
/*
* The realtime counter also called master counter, is a free-running
* counter, which is related to real time. It produces the count used
* by the CPU local timer peripherals in the MPU cluster. The timer counts
* at a rate of 6.144 MHz. Because the device operates on different clocks
* in different power modes, the master counter shifts operation between
* clocks, adjusting the increment per clock in hardware accordingly to
* maintain a constant count rate.
*/
static void __init realtime_counter_init(void)
{
void __iomem *base;
static struct clk *sys_clk;
unsigned long rate;
unsigned int reg, num, den;
base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
if (!base) {
pr_err("%s: ioremap failed\n", __func__);
return;
}
sys_clk = clk_get(NULL, "sys_clkin");
if (IS_ERR(sys_clk)) {
pr_err("%s: failed to get system clock handle\n", __func__);
iounmap(base);
return;
}
rate = clk_get_rate(sys_clk);
/* Numerator/denumerator values refer TRM Realtime Counter section */
switch (rate) {
case 12000000:
num = 64;
den = 125;
break;
case 13000000:
num = 768;
den = 1625;
break;
case 19200000:
num = 8;
den = 25;
break;
case 26000000:
num = 384;
den = 1625;
break;
case 27000000:
num = 256;
den = 1125;
break;
case 38400000:
default:
/* Program it for 38.4 MHz */
num = 4;
den = 25;
break;
}
/* Program numerator and denumerator registers */
reg = __raw_readl(base + INCREMENTER_NUMERATOR_OFFSET) &
NUMERATOR_DENUMERATOR_MASK;
reg |= num;
__raw_writel(reg, base + INCREMENTER_NUMERATOR_OFFSET);
reg = __raw_readl(base + INCREMENTER_NUMERATOR_OFFSET) &
NUMERATOR_DENUMERATOR_MASK;
reg |= den;
__raw_writel(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
iounmap(base);
}
#else
static inline void __init realtime_counter_init(void)
{}
#endif
#define OMAP_SYS_GP_TIMER_INIT(name, clkev_nr, clkev_src, clkev_prop, \
clksrc_nr, clksrc_src, clksrc_prop) \
void __init omap##name##_gptimer_timer_init(void) \
{ \
if (omap_clk_init) \
omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src, \
clksrc_prop); \
}
#define OMAP_SYS_32K_TIMER_INIT(name, clkev_nr, clkev_src, clkev_prop, \
clksrc_nr, clksrc_src, clksrc_prop) \
void __init omap##name##_sync32k_timer_init(void) \
{ \
if (omap_clk_init) \
omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
/* Enable the use of clocksource="gp_timer" kernel parameter */ \
if (use_gptimer_clksrc) \
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src, \
clksrc_prop); \
else \
omap2_sync32k_clocksource_init(); \
}
#ifdef CONFIG_ARCH_OMAP2
OMAP_SYS_32K_TIMER_INIT(2, 1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL);
#endif /* CONFIG_ARCH_OMAP2 */
#ifdef CONFIG_ARCH_OMAP3
OMAP_SYS_32K_TIMER_INIT(3, 1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL);
OMAP_SYS_32K_TIMER_INIT(3_secure, 12, "secure_32k_fck", "ti,timer-secure",
2, "timer_sys_ck", NULL);
#endif /* CONFIG_ARCH_OMAP3 */
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
OMAP_SYS_GP_TIMER_INIT(3, 2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon");
#endif
#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
static OMAP_SYS_32K_TIMER_INIT(4, 1, "timer_32k_ck", "ti,timer-alwon",
2, "sys_clkin_ck", NULL);
#endif
#ifdef CONFIG_ARCH_OMAP4
#ifdef CONFIG_LOCAL_TIMERS
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29);
void __init omap4_local_timer_init(void)
{
omap4_sync32k_timer_init();
/* Local timers are not supprted on OMAP4430 ES1.0 */
if (omap_rev() != OMAP4430_REV_ES1_0) {
int err;
if (of_have_populated_dt()) {
clocksource_of_init();
return;
}
err = twd_local_timer_register(&twd_local_timer);
if (err)
pr_err("twd_local_timer_register failed %d\n", err);
}
}
#else /* CONFIG_LOCAL_TIMERS */
void __init omap4_local_timer_init(void)
{
omap4_sync32k_timer_init();
}
#endif /* CONFIG_LOCAL_TIMERS */
#endif /* CONFIG_ARCH_OMAP4 */
#ifdef CONFIG_SOC_OMAP5
void __init omap5_realtime_timer_init(void)
{
omap4_sync32k_timer_init();
realtime_counter_init();
clocksource_of_init();
}
#endif /* CONFIG_SOC_OMAP5 */
/**
* omap_timer_init - build and register timer device with an
* associated timer hwmod
* @oh: timer hwmod pointer to be used to build timer device
* @user: parameter that can be passed from calling hwmod API
*
* Called by omap_hwmod_for_each_by_class to register each of the timer
* devices present in the system. The number of timer devices is known
* by parsing through the hwmod database for a given class name. At the
* end of function call memory is allocated for timer device and it is
* registered to the framework ready to be proved by the driver.
*/
static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
{
int id;
int ret = 0;
char *name = "omap_timer";
struct dmtimer_platform_data *pdata;
struct platform_device *pdev;
struct omap_timer_capability_dev_attr *timer_dev_attr;
pr_debug("%s: %s\n", __func__, oh->name);
/* on secure device, do not register secure timer */
timer_dev_attr = oh->dev_attr;
if (omap_type() != OMAP2_DEVICE_TYPE_GP && timer_dev_attr)
if (timer_dev_attr->timer_capability == OMAP_TIMER_SECURE)
return ret;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pr_err("%s: No memory for [%s]\n", __func__, oh->name);
return -ENOMEM;
}
/*
* Extract the IDs from name field in hwmod database
* and use the same for constructing ids' for the
* timer devices. In a way, we are avoiding usage of
* static variable witin the function to do the same.
* CAUTION: We have to be careful and make sure the
* name in hwmod database does not change in which case
* we might either make corresponding change here or
* switch back static variable mechanism.
*/
sscanf(oh->name, "timer%2d", &id);
if (timer_dev_attr)
pdata->timer_capability = timer_dev_attr->timer_capability;
pdata->timer_errata = omap_dm_timer_get_errata();
pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata));
if (IS_ERR(pdev)) {
pr_err("%s: Can't build omap_device for %s: %s.\n",
__func__, name, oh->name);
ret = -EINVAL;
}
kfree(pdata);
return ret;
}
/**
* omap2_dm_timer_init - top level regular device initialization
*
* Uses dedicated hwmod api to parse through hwmod database for
* given class name and then build and register the timer device.
*/
static int __init omap2_dm_timer_init(void)
{
int ret;
/* If dtb is there, the devices will be created dynamically */
if (of_have_populated_dt())
return -ENODEV;
ret = omap_hwmod_for_each_by_class("timer", omap_timer_init, NULL);
if (unlikely(ret)) {
pr_err("%s: device registration failed.\n", __func__);
return -EINVAL;
}
return 0;
}
omap_arch_initcall(omap2_dm_timer_init);
/**
* omap2_override_clocksource - clocksource override with user configuration
*
* Allows user to override default clocksource, using kernel parameter
* clocksource="gp_timer" (For all OMAP2PLUS architectures)
*
* Note that, here we are using same standard kernel parameter "clocksource=",
* and not introducing any OMAP specific interface.
*/
static int __init omap2_override_clocksource(char *str)
{
if (!str)
return 0;
/*
* For OMAP architecture, we only have two options
* - sync_32k (default)
* - gp_timer (sys_clk based)
*/
if (!strcmp(str, "gp_timer"))
use_gptimer_clksrc = true;
return 0;
}
early_param("clocksource", omap2_override_clocksource);
| gpl-2.0 |
Jackeagle/android_kernel_sm_g800h_kk | drivers/media/tdmb/fc8080/dmbdrv_wrap_fc8080.c | 221 | 17406 | /*
* Copyright(c) 2008 SEC Corp. All Rights Reserved
*
* File name : DMBDrv_wrap_FC8080.c
*
* Description : fc8080 tuner control driver
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* History :
* ----------------------------------------------------------------------
* 2009/01/19 changsul.park initial
* 2009/09/23 jason porting QSC6270
*/
#include "dmbdrv_wrap_fc8080.h"
#include "fci_types.h"
#include "bbm.h"
#include "fci_oal.h"
#include "fc8080_demux.h"
#include "fic.h"
#include "fci_tun.h"
#include "fc8080_regs.h"
#include "tdmb.h"
struct sub_channel_info_type dmb_subchannel_info;
struct sub_channel_info_type dab_subchannel_info;
struct sub_channel_info_type dat_subchannel_info;
static u32 saved_ber = 3000;
static u32 dmb_initialize;
unsigned char current_service_type = 0x18;
unsigned char current_subchannel_id;
int tdmb_interrupt_fic_callback(u32 userdata, u8 *data, int length)
{
fic_decoder_put((struct fic *)data, length);
return 0;
}
#ifdef FEATURE_FC8080_DEBUG
#define FC8080_DMB 0x01
#define FC8080_DATA 0x08
#define FC8080_DAB 0x04
u16 dmb_mode = FC8080_DMB;
#endif
int tdmb_interrupt_msc_callback(
u32 userdata, u8 subchannel_id, u8 *data, int length)
{
tdmb_store_data(&data[0], length);
return 0;
}
static int viterbi_rt_ber_read(unsigned int *ber)
{
u32 dmp_ber_rxd_bits;
u32 dmp_ber_err_bits;
bbm_com_write(NULL, 0xe01, 0x0f);
bbm_com_long_read(NULL, 0xe40, &dmp_ber_rxd_bits);
bbm_com_long_read(NULL, 0xe44, &dmp_ber_err_bits);
if (dmp_ber_rxd_bits)
*ber = (dmp_ber_err_bits * 10000) / dmp_ber_rxd_bits;
else
*ber = 3000;
print_log(NULL, "BER : %d \n", *ber);
return BBM_OK;
}
static int get_signal_level(u32 ber, u8 *level)
{
if (ber >= 900)
*level = 0;
else if ((ber >= 800) && (ber < 900))
*level = 1;
else if ((ber >= 700) && (ber < 800))
*level = 2;
else if ((ber >= 600) && (ber < 700))
*level = 3;
else if ((ber >= 500) && (ber < 600))
*level = 4;
else if ((ber >= 400) && (ber < 500))
*level = 5;
else if (ber < 400)
*level = 6;
return BBM_OK;
}
void dmb_drv_channel_deselect_all(void)
{
bbm_com_video_deselect(NULL, 0, 0, 0);
bbm_com_audio_deselect(NULL, 0, 1);
bbm_com_data_deselect(NULL, 0, 2);
ms_wait(100);
#ifdef CONFIG_TDMB_TSIF
fc8080_demux_deselect_video(current_subchannel_id, 0);
fc8080_demux_deselect_channel(current_subchannel_id, 0);
#endif
}
#ifdef CONFIG_TDMB_TSIF
void dmb_drv_isr(u8 *data, u32 length)
{
fc8080_demux(data, length);
}
#else
void dmb_drv_isr()
{
bbm_com_isr(NULL);
}
#endif
unsigned char dmb_drv_init(unsigned long param)
{
#ifdef FEATURE_INTERFACE_TEST_MODE
int i;
u8 data;
u16 wdata;
u32 ldata;
u8 temp = 0x1e;
#endif
#ifdef CONFIG_TDMB_SPI
if (bbm_com_hostif_select(NULL, BBM_SPI, param))
return TDMB_FAIL;
#elif defined(CONFIG_TDMB_EBI)
if (bbm_com_hostif_select(NULL, BBM_PPI, param))
return TDMB_FAIL;
#elif defined(CONFIG_TDMB_TSIF)
if (bbm_com_hostif_select(NULL, BBM_I2C, param))
return TDMB_FAIL;
#endif
/* check for factory chip interface test */
if (bbm_com_probe(NULL) != BBM_OK) {
DPRINTK("%s : BBM_PROBE fail\n", __func__);
return TDMB_FAIL;
}
#ifdef CONFIG_TDMB_TSIF
fc8080_demux_fic_callback_register(
(u32)NULL, tdmb_interrupt_fic_callback);
fc8080_demux_msc_callback_register(
(u32)NULL, tdmb_interrupt_msc_callback);
#else
bbm_com_fic_callback_register(0, tdmb_interrupt_fic_callback);
bbm_com_msc_callback_register(0, tdmb_interrupt_msc_callback);
#endif
bbm_com_init(NULL);
bbm_com_tuner_select(NULL, FC8080_TUNER, BAND3_TYPE);
#ifdef FEATURE_INTERFACE_TEST_MODE
for (i = 0; i < 1000; i++) {
bbm_com_write(NULL, 0xa4, i & 0xff);
bbm_com_read(NULL, 0xa4, &data);
if ((i & 0xff) != data)
DPRINTK("FC8080 byte test (0x%x,0x%x)\r\n"
, i & 0xff, data);
}
for (i = 0; i < 1000; i++) {
bbm_com_word_write(NULL, 0xa4, i & 0xffff);
bbm_com_word_read(NULL, 0xa4, &wdata);
if ((i & 0xffff) != wdata)
DPRINTK("FC8080 word test (0x%x,0x%x)\r\n"
, i & 0xffff, wdata);
}
for (i = 0; i < 1000; i++) {
bbm_com_long_write(NULL, 0xa4, i & 0xffffffff);
bbm_com_long_read(NULL, 0xa4, &ldata);
if ((i & 0xffffffff) != ldata)
DPRINTK("FC8080 long test (0x%x,0x%x)\r\n"
, i & 0xffffffff, ldata);
}
for (i = 0; i < 1000; i++) {
temp = i&0xff;
bbm_com_tuner_write(NULL, 0x13, 0x01, &temp, 0x01);
bbm_com_tuner_read(NULL, 0x13, 0x01, &data, 0x01);
if ((i & 0xff) != data)
DPRINTK("FC8080 tuner test (0x%x,0x%x)\r\n"
, i & 0xff, data);
}
#endif
saved_ber = 3000;
dmb_initialize = 1;
return TDMB_SUCCESS;
}
unsigned char dmb_drv_deinit(void)
{
dmb_initialize = 0;
dmb_drv_channel_deselect_all();
bbm_com_deinit(NULL);
bbm_com_fic_callback_deregister(NULL);
bbm_com_msc_callback_deregister(NULL);
bbm_com_hostif_deselect(NULL);
return TDMB_SUCCESS;
}
#ifdef FIC_USE_I2C
void dmb_drv_get_fic(void)
{
u8 i;
u8 lmode;
u16 mfIntStatus = 0;
u8 buf[FIC_BUF_LENGTH / 2];
bbm_com_read(NULL, BBM_TSO_SELREG, &lmode);
bbm_com_write(NULL, BBM_TSO_SELREG, lmode &= ~0x40);
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0100);
for (i = 0; i < 24; i++) {
bbm_com_word_read(NULL, BBM_BUF_STATUS, &mfIntStatus);
if (mfIntStatus & 0x0100) {
bbm_com_word_write(NULL, BBM_BUF_STATUS, mfIntStatus);
bbm_com_data(NULL, BBM_FIC_I2C_RD
, &buf[0], FIC_BUF_LENGTH / 2);
fic_decoder_put((struct fic *)&buf[0]
, FIC_BUF_LENGTH / 2);
print_log(NULL, "fic_decoder_put 0x%x \n", buf[0]);
}
ms_wait(50);
}
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0000);
bbm_com_write(NULL, BBM_TSO_SELREG, lmode);
}
#endif
unsigned char dmb_drv_scan_ch(unsigned long frequency)
{
struct esbinfo_t *esb;
if (!dmb_initialize)
return TDMB_FAIL;
if (bbm_com_tuner_set_freq(NULL, frequency))
return TDMB_FAIL;
fic_decoder_subchannel_info_clean();
if (bbm_com_scan_status(NULL)) {
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0000);
return TDMB_FAIL;
}
#ifdef FIC_USE_I2C
dmb_drv_get_fic();
#else
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0100);
/* wait 1.2 sec for gathering fic information */
ms_wait(1200);
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0000);
#endif
esb = fic_decoder_get_ensemble_info(0);
if (esb->flag != 99) {
print_log(NULL, "ESB ERROR \n");
fic_decoder_subchannel_info_clean();
return TDMB_FAIL;
}
if (strnlen(esb->label, sizeof(esb->label)) <= 0) {
fic_decoder_subchannel_info_clean();
print_log(NULL, "label ERROR \n");
return TDMB_FAIL;
}
return TDMB_SUCCESS;
}
int dmb_drv_get_dmb_sub_ch_cnt()
{
struct service_info_t *svc_info;
int i, n;
if (!dmb_initialize)
return 0;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x01)
&& (svc_info->dscty == 0x18))
n++;
}
}
return n;
}
int dmb_drv_get_dab_sub_ch_cnt()
{
struct service_info_t *svc_info;
int i, n;
if (!dmb_initialize)
return 0;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x00)
&& (svc_info->ascty == 0x00))
n++;
}
}
return n;
}
int dmb_drv_get_dat_sub_ch_cnt(void)
{
struct service_info_t *svc_info;
int i, n;
if (!dmb_initialize)
return 0;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if (svc_info->tmid == 0x03)
n++;
}
}
return n;
}
char *dmb_drv_get_ensemble_label()
{
struct esbinfo_t *esb;
if (!dmb_initialize)
return NULL;
esb = fic_decoder_get_ensemble_info(0);
if (esb->flag == 99)
return (char *)esb->label;
return NULL;
}
char *dmb_drv_get_sub_ch_dmb_label(int subchannel_count)
{
int i, n;
struct service_info_t *svc_info;
char *label = NULL;
if (!dmb_initialize)
return NULL;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x01)
&& (svc_info->dscty == 0x18)) {
if (n == subchannel_count) {
label = (char *) svc_info->label;
break;
}
n++;
}
}
}
return label;
}
char *dmb_drv_get_sub_ch_dab_label(int subchannel_count)
{
int i, n;
struct service_info_t *svc_info;
char *label = NULL;
if (!dmb_initialize)
return NULL;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x00)
&& (svc_info->ascty == 0x00)) {
if (n == subchannel_count) {
label = (char *) svc_info->label;
break;
}
n++;
}
}
}
return label;
}
char *dmb_drv_get_sub_ch_dat_label(int subchannel_count)
{
int i, n;
struct service_info_t *svc_info;
char *label = NULL;
if (!dmb_initialize)
return NULL;
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if (svc_info->tmid == 0x03) {
if (n == subchannel_count) {
label = (char *) svc_info->label;
break;
}
n++;
}
}
}
return label;
}
struct sub_channel_info_type *dmb_drv_get_fic_dmb(int subchannel_count)
{
int i, n, j;
struct esbinfo_t *esb;
struct service_info_t *svc_info;
u8 num_of_user_appl;
if (!dmb_initialize)
return NULL;
memset((void *)&dmb_subchannel_info, 0, sizeof(dmb_subchannel_info));
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x01)
&& (svc_info->dscty == 0x18)) {
if (n == subchannel_count) {
dmb_subchannel_info.ucSubchID
= svc_info->sub_channel_id;
dmb_subchannel_info.uiStartAddress
= 0;
dmb_subchannel_info.ucTMId
= svc_info->tmid;
dmb_subchannel_info.ucServiceType
= svc_info->dscty;
dmb_subchannel_info.ulServiceID
= svc_info->sid;
dmb_subchannel_info.scids
= svc_info->scids;
num_of_user_appl =
svc_info->num_of_user_appl;
dmb_subchannel_info.num_of_user_appl
= num_of_user_appl;
for (j = 0; j < num_of_user_appl; j++) {
dmb_subchannel_info.
user_appl_type[j]
= svc_info->user_appl_type[j];
dmb_subchannel_info.
user_appl_length[j]
= svc_info->user_appl_length[j];
memcpy(
&dmb_subchannel_info.
user_appl_data[j][0]
, &svc_info->
user_appl_data[j][0]
, dmb_subchannel_info.
user_appl_length[j]);
}
esb = fic_decoder_get_ensemble_info(0);
if (esb->flag == 99)
dmb_subchannel_info.uiEnsembleID
= esb->eid;
else
dmb_subchannel_info.uiEnsembleID
= 0;
dmb_subchannel_info.ecc = esb->ecc;
break;
}
n++;
}
}
}
return &dmb_subchannel_info;
}
struct sub_channel_info_type *dmb_drv_get_fic_dab(int subchannel_count)
{
int i, n;
struct esbinfo_t *esb;
struct service_info_t *svc_info;
if (!dmb_initialize)
return NULL;
memset((void *)&dab_subchannel_info, 0, sizeof(dab_subchannel_info));
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if ((svc_info->tmid == 0x00)
&& (svc_info->ascty == 0x00)) {
if (n == subchannel_count) {
dab_subchannel_info.ucSubchID =
svc_info->sub_channel_id;
dab_subchannel_info.uiStartAddress = 0;
dab_subchannel_info.ucTMId
= svc_info->tmid;
dab_subchannel_info.ucServiceType =
svc_info->ascty;
dab_subchannel_info.ulServiceID =
svc_info->sid;
dab_subchannel_info.scids =
svc_info->scids;
esb = fic_decoder_get_ensemble_info(0);
if (esb->flag == 99)
dmb_subchannel_info.uiEnsembleID
= esb->eid;
else
dmb_subchannel_info.uiEnsembleID
= 0;
dab_subchannel_info.ecc = esb->ecc;
break;
}
n++;
}
}
}
return &dab_subchannel_info;
}
struct sub_channel_info_type *dmb_drv_get_fic_dat(int subchannel_count)
{
int i, n, j;
struct esbinfo_t *esb;
struct service_info_t *svc_info;
u8 num_of_user_appl;
struct scInfo_t *pScInfo;
if (!dmb_initialize)
return NULL;
memset((void *)&dat_subchannel_info, 0, sizeof(dat_subchannel_info));
n = 0;
for (i = 0; i < MAX_SVC_NUM; i++) {
svc_info = fic_decoder_get_service_info_list(i);
if ((svc_info->flag & 0x07) == 0x07) {
if (svc_info->tmid == 0x03) {
if (n == subchannel_count) {
dat_subchannel_info.ucSubchID =
svc_info->sub_channel_id;
dat_subchannel_info.uiStartAddress = 0;
dat_subchannel_info.ucTMId
= svc_info->tmid;
pScInfo = get_sc_info(svc_info->scid);
dat_subchannel_info.ucServiceType =
pScInfo->dscty;
dat_subchannel_info.ulServiceID =
svc_info->sid;
dat_subchannel_info.scids =
svc_info->scids;
num_of_user_appl =
svc_info->num_of_user_appl;
dat_subchannel_info.num_of_user_appl
= num_of_user_appl;
for (j = 0; j < num_of_user_appl; j++) {
dat_subchannel_info.
user_appl_type[j]
= svc_info->user_appl_type[j];
dat_subchannel_info.
user_appl_length[j]
= svc_info->user_appl_length[j];
memcpy(
&dat_subchannel_info.
user_appl_data[j][0]
, &svc_info->
user_appl_data[j][0]
, dat_subchannel_info.
user_appl_length[j]);
}
esb = fic_decoder_get_ensemble_info(0);
if (esb->flag == 99)
dat_subchannel_info.uiEnsembleID
= esb->eid;
else
dat_subchannel_info.uiEnsembleID
= 0;
dat_subchannel_info.ecc = esb->ecc;
break;
}
n++;
}
}
}
return &dat_subchannel_info;
}
#ifdef FEATURE_FC8080_DEBUG
void dmb_drv_check_overrun(u8 reset)
{
u16 overrun;
u16 temp = 0;
bbm_com_word_read(NULL, BBM_BUF_OVERRUN, &overrun);
if (overrun & dmb_mode) {
/* overrun clear */
bbm_com_word_write(NULL, BBM_BUF_OVERRUN, overrun);
if (reset) {
/* buffer restore */
bbm_com_word_read(NULL, BBM_BUF_ENABLE, &temp);
temp &= ~dmb_mode;
bbm_com_word_write(NULL, BBM_BUF_ENABLE, temp);
temp |= dmb_mode;
bbm_com_word_write(NULL, BBM_BUF_ENABLE, temp);
}
DPRINTK("fc8080 Overrun occured\n");
}
}
#endif
unsigned char dmb_drv_set_ch(
unsigned long frequency
, unsigned char subchannel
, unsigned char sevice_type)
{
if (!dmb_initialize)
return TDMB_FAIL;
current_service_type = sevice_type;
current_subchannel_id = subchannel;
dmb_drv_channel_deselect_all();
if (bbm_com_tuner_set_freq(NULL, frequency) != BBM_OK)
return TDMB_FAIL;
if (sevice_type == 0x18)
bbm_com_video_select(NULL, subchannel, 0, 0);
else if (sevice_type == 0x00)
bbm_com_audio_select(NULL, subchannel, 1);
else
bbm_com_data_select(NULL, subchannel, 2);
#ifdef CONFIG_TDMB_TSIF
if (sevice_type == 0x18)
fc8080_demux_select_video(subchannel, 0);
else if (sevice_type == 0x00)
fc8080_demux_select_channel(subchannel, 1);
else
fc8080_demux_select_channel(subchannel, 2);
#endif
#ifdef FEATURE_FC8080_DEBUG
if (sevice_type == 0x18)
dmb_mode = FC8080_DMB;
else if (sevice_type == 0x00)
dmb_mode = FC8080_DAB;
else
dmb_mode = FC8080_DATA;
#endif
return TDMB_SUCCESS;
}
unsigned char dmb_drv_set_ch_factory(
unsigned long frequency
, unsigned char subchannel
, unsigned char sevice_type)
{
if (!dmb_initialize)
return TDMB_FAIL;
current_service_type = sevice_type;
current_subchannel_id = subchannel;
dmb_drv_channel_deselect_all();
if (bbm_com_tuner_set_freq(NULL, frequency) != BBM_OK)
return TDMB_FAIL;
if (bbm_com_scan_status(NULL)) {
DPRINTK("%s scan fail\n", __func__);
return TDMB_FAIL;
}
if (sevice_type == 0x18)
bbm_com_video_select(NULL, subchannel, 0, 0);
else if (sevice_type == 0x00)
bbm_com_audio_select(NULL, subchannel, 1);
else
bbm_com_data_select(NULL, subchannel, 2);
#ifdef FEATURE_FC8080_DEBUG
if (sevice_type == 0x18)
dmb_mode = FC8080_DMB;
else if (sevice_type == 0x00)
dmb_mode = FC8080_DAB;
else
dmb_mode = FC8080_DATA;
#endif
return TDMB_SUCCESS;
}
unsigned short dmb_drv_get_ber()
{
return saved_ber;
}
unsigned char dmb_drv_get_ant(void)
{
u8 level = 0;
unsigned int ber;
if (!dmb_initialize) {
saved_ber = 3000;
return 0;
}
if (viterbi_rt_ber_read(&ber)) {
saved_ber = 3000;
return 0;
}
if (ber <= 20)
ber = 0;
saved_ber = ber;
if (get_signal_level(ber, &level))
return 0;
#ifdef FEATURE_FC8080_DEBUG
dmb_drv_check_overrun(1);
#endif
return level;
}
signed short dmb_drv_get_rssi()
{
s32 rssi;
if (!dmb_initialize) {
rssi = -110;
return rssi;
}
bbm_com_tuner_get_rssi(NULL, &rssi);
return (signed short)rssi;
}
| gpl-2.0 |
threader/gcc-5666.3-darwin8 | libgomp/testsuite/libgomp.c/pr30494.c | 221 | 1095 | /* PR middle-end/30494 */
/* { dg-do run } */
#include <omp.h>
int errors;
int
check (int m, int i, int *v, int *w)
{
int j;
int n = omp_get_thread_num ();
for (j = 0; j < m; j++)
if (v[j] != j + n)
#pragma omp atomic
errors += 1;
for (j = 0; j < m * 3 + i; j++)
if (w[j] != j + 10 + n)
#pragma omp atomic
errors += 1;
}
int
foo (int n, int m)
{
int i;
#pragma omp for
for (i = 0; i < 6; i++)
{
int v[n], w[n * 3 + i], j;
for (j = 0; j < n; j++)
v[j] = j + omp_get_thread_num ();
for (j = 0; j < n * 3 + i; j++)
w[j] = j + 10 + omp_get_thread_num ();
check (m, i, v, w);
}
return 0;
}
int
bar (int n, int m)
{
int i;
#pragma omp parallel for num_threads (4)
for (i = 0; i < 6; i++)
{
int v[n], w[n * 3 + i], j;
for (j = 0; j < n; j++)
v[j] = j + omp_get_thread_num ();
for (j = 0; j < n * 3 + i; j++)
w[j] = j + 10 + omp_get_thread_num ();
check (m, i, v, w);
}
return 0;
}
int
main (void)
{
#pragma omp parallel num_threads (3)
foo (128, 128);
bar (256, 256);
return 0;
}
| gpl-2.0 |
HostZero/android_kernel_zuk_msm8996 | drivers/usb/phy/phy-tahvo.c | 733 | 11392 | /*
* Tahvo USB transceiver driver
*
* Copyright (C) 2005-2006 Nokia Corporation
*
* Parts copied from isp1301_omap.c.
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
*
* Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
* Modified for Retu/Tahvo MFD by Aaro Koskinen.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/extcon.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
#include <linux/mfd/retu.h>
#include <linux/usb/gadget.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "tahvo-usb"
#define TAHVO_REG_IDSR 0x02
#define TAHVO_REG_USBR 0x06
#define USBR_SLAVE_CONTROL (1 << 8)
#define USBR_VPPVIO_SW (1 << 7)
#define USBR_SPEED (1 << 6)
#define USBR_REGOUT (1 << 5)
#define USBR_MASTER_SW2 (1 << 4)
#define USBR_MASTER_SW1 (1 << 3)
#define USBR_SLAVE_SW (1 << 2)
#define USBR_NSUSPEND (1 << 1)
#define USBR_SEMODE (1 << 0)
#define TAHVO_MODE_HOST 0
#define TAHVO_MODE_PERIPHERAL 1
struct tahvo_usb {
struct platform_device *pt_dev;
struct usb_phy phy;
int vbus_state;
struct mutex serialize;
struct clk *ick;
int irq;
int tahvo_mode;
struct extcon_dev extcon;
};
static const char *tahvo_cable[] = {
"USB-HOST",
"USB",
NULL,
};
static ssize_t vbus_state_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
return sprintf(buf, "%s\n", tu->vbus_state ? "on" : "off");
}
static DEVICE_ATTR(vbus, 0444, vbus_state_show, NULL);
static void check_vbus_state(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
int reg, prev_state;
reg = retu_read(rdev, TAHVO_REG_IDSR);
if (reg & TAHVO_STAT_VBUS) {
switch (tu->phy.state) {
case OTG_STATE_B_IDLE:
/* Enable the gadget driver */
if (tu->phy.otg->gadget)
usb_gadget_vbus_connect(tu->phy.otg->gadget);
tu->phy.state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_A_IDLE:
/*
* Session is now valid assuming the USB hub is driving
* Vbus.
*/
tu->phy.state = OTG_STATE_A_HOST;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable connected\n");
} else {
switch (tu->phy.state) {
case OTG_STATE_B_PERIPHERAL:
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_A_HOST:
tu->phy.state = OTG_STATE_A_IDLE;
break;
default:
break;
}
dev_info(&tu->pt_dev->dev, "USB cable disconnected\n");
}
prev_state = tu->vbus_state;
tu->vbus_state = reg & TAHVO_STAT_VBUS;
if (prev_state != tu->vbus_state) {
extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
}
}
static void tahvo_usb_become_host(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_cable_state(&tu->extcon, "USB-HOST", true);
/* Power up the transceiver in USB host mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
USBR_MASTER_SW2 | USBR_MASTER_SW1);
tu->phy.state = OTG_STATE_A_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_host(struct tahvo_usb *tu)
{
tu->phy.state = OTG_STATE_A_IDLE;
}
static void tahvo_usb_become_peripheral(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
extcon_set_cable_state(&tu->extcon, "USB-HOST", false);
/* Power up transceiver and set it in USB peripheral mode */
retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
USBR_NSUSPEND | USBR_SLAVE_SW);
tu->phy.state = OTG_STATE_B_IDLE;
check_vbus_state(tu);
}
static void tahvo_usb_stop_peripheral(struct tahvo_usb *tu)
{
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
tu->phy.state = OTG_STATE_B_IDLE;
}
static void tahvo_usb_power_off(struct tahvo_usb *tu)
{
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
/* Disable gadget controller if any */
if (tu->phy.otg->gadget)
usb_gadget_vbus_disconnect(tu->phy.otg->gadget);
/* Power off transceiver */
retu_write(rdev, TAHVO_REG_USBR, 0);
tu->phy.state = OTG_STATE_UNDEFINED;
}
static int tahvo_usb_set_suspend(struct usb_phy *dev, int suspend)
{
struct tahvo_usb *tu = container_of(dev, struct tahvo_usb, phy);
struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
u16 w;
dev_dbg(&tu->pt_dev->dev, "%s\n", __func__);
w = retu_read(rdev, TAHVO_REG_USBR);
if (suspend)
w &= ~USBR_NSUSPEND;
else
w |= USBR_NSUSPEND;
retu_write(rdev, TAHVO_REG_USBR, w);
return 0;
}
static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host);
mutex_lock(&tu->serialize);
if (host == NULL) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_power_off(tu);
otg->host = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
if (tu->tahvo_mode == TAHVO_MODE_HOST) {
otg->host = NULL;
tahvo_usb_become_host(tu);
}
otg->host = host;
mutex_unlock(&tu->serialize);
return 0;
}
static int tahvo_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
struct tahvo_usb *tu = container_of(otg->phy, struct tahvo_usb, phy);
dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget);
mutex_lock(&tu->serialize);
if (!gadget) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_power_off(tu);
tu->phy.otg->gadget = NULL;
mutex_unlock(&tu->serialize);
return 0;
}
tu->phy.otg->gadget = gadget;
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_become_peripheral(tu);
mutex_unlock(&tu->serialize);
return 0;
}
static irqreturn_t tahvo_usb_vbus_interrupt(int irq, void *_tu)
{
struct tahvo_usb *tu = _tu;
mutex_lock(&tu->serialize);
check_vbus_state(tu);
mutex_unlock(&tu->serialize);
return IRQ_HANDLED;
}
static ssize_t otg_mode_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
switch (tu->tahvo_mode) {
case TAHVO_MODE_HOST:
return sprintf(buf, "host\n");
case TAHVO_MODE_PERIPHERAL:
return sprintf(buf, "peripheral\n");
}
return -EINVAL;
}
static ssize_t otg_mode_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct tahvo_usb *tu = dev_get_drvdata(device);
int r;
mutex_lock(&tu->serialize);
if (count >= 4 && strncmp(buf, "host", 4) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_PERIPHERAL)
tahvo_usb_stop_peripheral(tu);
tu->tahvo_mode = TAHVO_MODE_HOST;
if (tu->phy.otg->host) {
dev_info(device, "HOST mode: host controller present\n");
tahvo_usb_become_host(tu);
} else {
dev_info(device, "HOST mode: no host controller, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else if (count >= 10 && strncmp(buf, "peripheral", 10) == 0) {
if (tu->tahvo_mode == TAHVO_MODE_HOST)
tahvo_usb_stop_host(tu);
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
if (tu->phy.otg->gadget) {
dev_info(device, "PERIPHERAL mode: gadget driver present\n");
tahvo_usb_become_peripheral(tu);
} else {
dev_info(device, "PERIPHERAL mode: no gadget driver, powering off\n");
tahvo_usb_power_off(tu);
}
r = strlen(buf);
} else {
r = -EINVAL;
}
mutex_unlock(&tu->serialize);
return r;
}
static DEVICE_ATTR(otg_mode, 0644, otg_mode_show, otg_mode_store);
static struct attribute *tahvo_attributes[] = {
&dev_attr_vbus.attr,
&dev_attr_otg_mode.attr,
NULL
};
static struct attribute_group tahvo_attr_group = {
.attrs = tahvo_attributes,
};
static int tahvo_usb_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
struct tahvo_usb *tu;
int ret;
tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL);
if (!tu)
return -ENOMEM;
tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg),
GFP_KERNEL);
if (!tu->phy.otg)
return -ENOMEM;
tu->pt_dev = pdev;
/* Default mode */
#ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT
tu->tahvo_mode = TAHVO_MODE_HOST;
#else
tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
#endif
mutex_init(&tu->serialize);
tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick");
if (!IS_ERR(tu->ick))
clk_enable(tu->ick);
/*
* Set initial state, so that we generate kevents only on state changes.
*/
tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
tu->extcon.name = DRIVER_NAME;
tu->extcon.supported_cable = tahvo_cable;
tu->extcon.dev.parent = &pdev->dev;
ret = extcon_dev_register(&tu->extcon);
if (ret) {
dev_err(&pdev->dev, "could not register extcon device: %d\n",
ret);
goto err_disable_clk;
}
/* Set the initial cable state. */
extcon_set_cable_state(&tu->extcon, "USB-HOST",
tu->tahvo_mode == TAHVO_MODE_HOST);
extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
/* Create OTG interface */
tahvo_usb_power_off(tu);
tu->phy.dev = &pdev->dev;
tu->phy.state = OTG_STATE_UNDEFINED;
tu->phy.label = DRIVER_NAME;
tu->phy.set_suspend = tahvo_usb_set_suspend;
tu->phy.otg->phy = &tu->phy;
tu->phy.otg->set_host = tahvo_usb_set_host;
tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;
ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2);
if (ret < 0) {
dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
ret);
goto err_extcon_unreg;
}
dev_set_drvdata(&pdev->dev, tu);
tu->irq = platform_get_irq(pdev, 0);
ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
"tahvo-vbus", tu);
if (ret) {
dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
ret);
goto err_remove_phy;
}
/* Attributes */
ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group);
if (ret) {
dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret);
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(tu->irq, tu);
err_remove_phy:
usb_remove_phy(&tu->phy);
err_extcon_unreg:
extcon_dev_unregister(&tu->extcon);
err_disable_clk:
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
return ret;
}
static int tahvo_usb_remove(struct platform_device *pdev)
{
struct tahvo_usb *tu = platform_get_drvdata(pdev);
sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
free_irq(tu->irq, tu);
usb_remove_phy(&tu->phy);
extcon_dev_unregister(&tu->extcon);
if (!IS_ERR(tu->ick))
clk_disable(tu->ick);
return 0;
}
static struct platform_driver tahvo_usb_driver = {
.probe = tahvo_usb_probe,
.remove = tahvo_usb_remove,
.driver = {
.name = "tahvo-usb",
.owner = THIS_MODULE,
},
};
module_platform_driver(tahvo_usb_driver);
MODULE_DESCRIPTION("Tahvo USB transceiver driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Juha Yrjölä, Tony Lindgren, and Timo Teräs");
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
| gpl-2.0 |
GoinsWithTheWind/drm-prime-sync | drivers/staging/lustre/lustre/lclient/lcommon_misc.c | 1245 | 5732 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
* cl code shared between vvp and liblustre (and other Lustre clients in the
* future).
*
*/
#include "../include/obd_class.h"
#include "../include/obd_support.h"
#include "../include/obd.h"
#include "../include/cl_object.h"
#include "../include/lclient.h"
#include "../include/lustre_lite.h"
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
* maximum-sized (= maximum striped) EA and cookie without having to
* calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
{
struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
__u32 valsize = sizeof(struct lov_desc);
int rc, easize, def_easize, cookiesize;
struct lov_desc desc;
__u16 stripes, def_stripes;
rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
&valsize, &desc, NULL);
if (rc)
return rc;
stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT);
lsm.lsm_stripe_count = stripes;
easize = obd_size_diskmd(dt_exp, &lsm);
def_stripes = min_t(__u32, desc.ld_default_stripe_count,
LOV_MAX_STRIPE_COUNT);
lsm.lsm_stripe_count = def_stripes;
def_easize = obd_size_diskmd(dt_exp, &lsm);
cookiesize = stripes * sizeof(struct llog_cookie);
/* default cookiesize is 0 because from 2.4 server doesn't send
* llog cookies to client. */
CDEBUG(D_HA,
"updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
def_easize, easize, cookiesize);
rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0);
return rc;
}
/**
* This function is used as an upcall-callback hooked by liblustre and llite
* clients into obd_notify() listeners chain to handle notifications about
* change of import connect_flags. See llu_fsswop_mount() and
* lustre_common_fill_super().
*/
int cl_ocd_update(struct obd_device *host,
struct obd_device *watched,
enum obd_notify_event ev, void *owner, void *data)
{
struct lustre_client_ocd *lco;
struct client_obd *cli;
__u64 flags;
int result;
if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
cli = &watched->u.cli;
lco = owner;
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
CDEBUG(D_SUPER, "Changing connect_flags: %#llx -> %#llx\n",
lco->lco_flags, flags);
mutex_lock(&lco->lco_lock);
lco->lco_flags &= flags;
/* for each osc event update ea size */
if (lco->lco_dt_exp)
cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
mutex_unlock(&lco->lco_lock);
result = 0;
} else {
CERROR("unexpected notification from %s %s!\n",
watched->obd_type->typ_name,
watched->obd_name);
result = -EINVAL;
}
return result;
}
#define GROUPLOCK_SCOPE "grouplock"
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
struct ccc_grouplock *cg)
{
struct lu_env *env;
struct cl_io *io;
struct cl_lock *lock;
struct cl_lock_descr *descr;
__u32 enqflags;
int refcheck;
int rc;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
io = ccc_env_thread_io(env);
io->ci_obj = obj;
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc) {
/* Does not make sense to take GL for released layout */
if (rc > 0)
rc = -ENOTSUPP;
cl_env_put(env, &refcheck);
return rc;
}
descr = &ccc_env_info(env)->cti_descr;
descr->cld_obj = obj;
descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
descr->cld_gid = gid;
descr->cld_mode = CLM_GROUP;
enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
descr->cld_enq_flags = enqflags;
lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
if (IS_ERR(lock)) {
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
return PTR_ERR(lock);
}
cg->cg_env = cl_env_get(&refcheck);
cg->cg_io = io;
cg->cg_lock = lock;
cg->cg_gid = gid;
LASSERT(cg->cg_env == env);
cl_env_unplant(env, &refcheck);
return 0;
}
void cl_put_grouplock(struct ccc_grouplock *cg)
{
struct lu_env *env = cg->cg_env;
struct cl_io *io = cg->cg_io;
struct cl_lock *lock = cg->cg_lock;
int refcheck;
LASSERT(cg->cg_env);
LASSERT(cg->cg_gid);
cl_env_implant(env, &refcheck);
cl_env_put(env, &refcheck);
cl_unuse(env, lock);
cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
cl_io_fini(env, io);
cl_env_put(env, NULL);
}
| gpl-2.0 |
SamYaple/bcache-dev | drivers/mfd/cs5535-mfd.c | 1501 | 4811 | /*
* cs5535-mfd.c - core MFD driver for CS5535/CS5536 southbridges
*
* The CS5535 and CS5536 has an ISA bridge on the PCI bus that is
* used for accessing GPIOs, MFGPTs, ACPI, etc. Each subdevice has
* an IO range that's specified in a single BAR. The BAR order is
* hardcoded in the CS553x specifications.
*
* Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <asm/olpc.h>
#define DRV_NAME "cs5535-mfd"
enum cs5535_mfd_bars {
SMB_BAR = 0,
GPIO_BAR = 1,
MFGPT_BAR = 2,
PMS_BAR = 4,
ACPI_BAR = 5,
NR_BARS,
};
static int cs5535_mfd_res_enable(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
if (!request_region(res->start, resource_size(res), DRV_NAME)) {
dev_err(&pdev->dev, "can't request region\n");
return -EIO;
}
return 0;
}
static int cs5535_mfd_res_disable(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
return -EIO;
}
release_region(res->start, resource_size(res));
return 0;
}
static struct resource cs5535_mfd_resources[NR_BARS];
static struct mfd_cell cs5535_mfd_cells[] = {
{
.id = SMB_BAR,
.name = "cs5535-smb",
.num_resources = 1,
.resources = &cs5535_mfd_resources[SMB_BAR],
},
{
.id = GPIO_BAR,
.name = "cs5535-gpio",
.num_resources = 1,
.resources = &cs5535_mfd_resources[GPIO_BAR],
},
{
.id = MFGPT_BAR,
.name = "cs5535-mfgpt",
.num_resources = 1,
.resources = &cs5535_mfd_resources[MFGPT_BAR],
},
{
.id = PMS_BAR,
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
.enable = cs5535_mfd_res_enable,
.disable = cs5535_mfd_res_disable,
},
{
.id = ACPI_BAR,
.name = "cs5535-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
.enable = cs5535_mfd_res_enable,
.disable = cs5535_mfd_res_disable,
},
};
#ifdef CONFIG_OLPC
static void cs5535_clone_olpc_cells(void)
{
const char *acpi_clones[] = { "olpc-xo1-pm-acpi", "olpc-xo1-sci-acpi" };
if (!machine_is_olpc())
return;
mfd_clone_cell("cs5535-acpi", acpi_clones, ARRAY_SIZE(acpi_clones));
}
#else
static void cs5535_clone_olpc_cells(void) { }
#endif
static int cs5535_mfd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int err, i;
err = pci_enable_device(pdev);
if (err)
return err;
/* fill in IO range for each cell; subdrivers handle the region */
for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
int bar = cs5535_mfd_cells[i].id;
struct resource *r = &cs5535_mfd_resources[bar];
r->flags = IORESOURCE_IO;
r->start = pci_resource_start(pdev, bar);
r->end = pci_resource_end(pdev, bar);
/* id is used for temporarily storing BAR; unset it now */
cs5535_mfd_cells[i].id = 0;
}
err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
ARRAY_SIZE(cs5535_mfd_cells), NULL, 0, NULL);
if (err) {
dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
goto err_disable;
}
cs5535_clone_olpc_cells();
dev_info(&pdev->dev, "%zu devices registered.\n",
ARRAY_SIZE(cs5535_mfd_cells));
return 0;
err_disable:
pci_disable_device(pdev);
return err;
}
static void cs5535_mfd_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
pci_disable_device(pdev);
}
static const struct pci_device_id cs5535_mfd_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
static struct pci_driver cs5535_mfd_driver = {
.name = DRV_NAME,
.id_table = cs5535_mfd_pci_tbl,
.probe = cs5535_mfd_probe,
.remove = cs5535_mfd_remove,
};
module_pci_driver(cs5535_mfd_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Barracuda09/linux | drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c | 2269 | 2649 | /*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <subdev/fb.h>
struct nv1a_fb_priv {
struct nouveau_fb base;
};
static int
nv1a_fb_vram_init(struct nouveau_fb *pfb)
{
struct pci_dev *bridge;
u32 mem, mib;
bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
if (!bridge) {
nv_fatal(pfb, "no bridge device\n");
return -ENODEV;
}
if (nv_device(pfb)->chipset == 0x1a) {
pci_read_config_dword(bridge, 0x7c, &mem);
mib = ((mem >> 6) & 31) + 1;
} else {
pci_read_config_dword(bridge, 0x84, &mem);
mib = ((mem >> 4) & 127) + 1;
}
pfb->ram.type = NV_MEM_TYPE_STOLEN;
pfb->ram.size = mib * 1024 * 1024;
return 0;
}
static int
nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv1a_fb_priv *priv;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.memtype_valid = nv04_fb_memtype_valid;
priv->base.ram.init = nv1a_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv10_fb_tile_init;
priv->base.tile.fini = nv10_fb_tile_fini;
priv->base.tile.prog = nv10_fb_tile_prog;
return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
nv1a_fb_oclass = {
.handle = NV_SUBDEV(FB, 0x1a),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv1a_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = _nouveau_fb_init,
.fini = _nouveau_fb_fini,
},
};
| gpl-2.0 |
RadiumBot/Radium_tomato | block/blk-flush.c | 2269 | 13391 | /*
* Functions to sequence FLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
* REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
*
* If a request doesn't have data, only REQ_FLUSH makes sense, which
* indicates a simple flush request. If there is data, REQ_FLUSH indicates
* that the device cache should be flushed before the data is executed, and
* REQ_FUA means that the data must be on non-volatile media on request
* completion.
*
* If the device doesn't have writeback cache, FLUSH and FUA don't make any
* difference. The requests are either completed immediately if there's no
* data or executed as normal requests otherwise.
*
* If the device has writeback cache and supports FUA, REQ_FLUSH is
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
*
* If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
* translated to PREFLUSH and REQ_FUA to POSTFLUSH.
*
* The actual execution of flush is double buffered. Whenever a request
* needs to execute PRE or POSTFLUSH, it queues at
* q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
* flush is issued and the pending_idx is toggled. When the flush
* completes, all the requests which were pending are proceeded to the next
* step. This allows arbitrary merging of different types of FLUSH/FUA
* requests.
*
* Currently, the following conditions are used to determine when to issue
* flush.
*
* C1. At any given time, only one flush shall be in progress. This makes
* double buffering sufficient.
*
* C2. Flush is deferred if any request is executing DATA of its sequence.
* This avoids issuing separate POSTFLUSHes for requests which shared
* PREFLUSH.
*
* C3. The second condition is ignored if there is a request which has
* waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
* starvation in the unlikely case where there are continuous stream of
* FUA (without FLUSH) requests.
*
* For devices which support FUA, it isn't clear whether C2 (and thus C3)
* is beneficial.
*
* Note that a sequenced FLUSH/FUA request with DATA is completed twice.
* Once while executing DATA and again after the whole sequence is
* complete. The first completion updates the contained bio but doesn't
* finish it so that the bio submitter is notified only after the whole
* sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
* req_bio_endio().
*
* The above peculiarity requires that each FLUSH/FUA request has only one
* bio attached to it, which is guaranteed as they aren't allowed to be
* merged in the usual way.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include "blk.h"
/* FLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
REQ_FSEQ_DONE = (1 << 3),
REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
REQ_FSEQ_POSTFLUSH,
/*
* If flush has been pending longer than the following timeout,
* it's issued even if flush_data requests are still in flight.
*/
FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
static bool blk_kick_flush(struct request_queue *q);
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
if (fflags & REQ_FLUSH) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
}
static unsigned int blk_flush_cur_seq(struct request *rq)
{
return 1 << ffz(rq->flush.seq);
}
static void blk_flush_restore_request(struct request *rq)
{
/*
* After flush data completion, @rq->bio is %NULL but we need to
* complete the bio again. @rq->biotail is guaranteed to equal the
* original @rq->bio. Restore it.
*/
rq->bio = rq->biotail;
/* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
}
/**
* blk_flush_complete_seq - complete flush sequence
* @rq: FLUSH/FUA request being sequenced
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
* @error: whether an error occurred
*
* @rq just completed @seq part of its flush sequence, record the
* completion and trigger the next step.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
*/
static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
int error)
{
struct request_queue *q = rq->q;
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
bool queued = false;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
if (likely(!error))
seq = blk_flush_cur_seq(rq);
else
seq = REQ_FSEQ_DONE;
switch (seq) {
case REQ_FSEQ_PREFLUSH:
case REQ_FSEQ_POSTFLUSH:
/* queue for flush */
if (list_empty(pending))
q->flush_pending_since = jiffies;
list_move_tail(&rq->flush.list, pending);
break;
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
list_add(&rq->queuelist, &q->queue_head);
queued = true;
break;
case REQ_FSEQ_DONE:
/*
* @rq was previously adjusted by blk_flush_issue() for
* flush sequencing and may already have gone through the
* flush data request completion path. Restore @rq for
* normal completion and end it.
*/
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
__blk_end_request_all(rq, error);
break;
default:
BUG();
}
return blk_kick_flush(q) | queued;
}
static void flush_end_io(struct request *flush_rq, int error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx];
bool queued = false;
struct request *rq, *n;
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
/* account completion of the flush request */
q->flush_running_idx ^= 1;
elv_completed_request(q, flush_rq);
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
queued |= blk_flush_complete_seq(rq, seq, error);
}
/*
* Kick the queue to avoid stall for two cases:
* 1. Moving a request silently to empty queue_head may stall the
* queue.
* 2. When flush request is running in non-queueable queue, the
* queue is hold. Restart the queue after flush request is finished
* to avoid stall.
* This function is called from request completion path and calling
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/
if (queued || q->flush_queue_delayed)
blk_run_queue_async(q);
q->flush_queue_delayed = 0;
}
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
*
* Flush related states of @q have changed, consider issuing flush request.
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*
* RETURNS:
* %true if flush was issued, %false otherwise.
*/
static bool blk_kick_flush(struct request_queue *q)
{
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
struct request *first_rq =
list_first_entry(pending, struct request, flush.list);
/* C1 described at the top of this file */
if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
return false;
/* C2 and C3 */
if (!list_empty(&q->flush_data_in_flight) &&
time_before(jiffies,
q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return false;
/*
* Issue flush and toggle pending_idx. This makes pending_idx
* different from running_idx, which means flush is in flight.
*/
blk_rq_init(q, &q->flush_rq);
q->flush_rq.cmd_type = REQ_TYPE_FS;
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
q->flush_rq.rq_disk = first_rq->rq_disk;
q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true;
}
static void flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
blk_run_queue_async(q);
}
/**
* blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert
*
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*/
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned int fflags = q->flush_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
/*
* @policy now records what operations need to be done. Adjust
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
if (!(fflags & REQ_FUA))
rq->cmd_flags &= ~REQ_FUA;
/*
* An empty flush handed down from a stacking driver may
* translate into nothing if the underlying device does not
* advertise a write-back cache. In this case, simply
* complete the request.
*/
if (!policy) {
__blk_end_bidi_request(rq, 0, 0, 0);
return;
}
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
/*
* If there's data but flush is not necessary, the request can be
* processed directly without going through flush machinery. Queue
* for normal execution.
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
/*
* @rq should go through flush machinery. Mark it part of flush
* sequence and submit for further processing.
*/
memset(&rq->flush, 0, sizeof(rq->flush));
INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq->end_io = flush_data_end_io;
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
}
/**
* blk_abort_flushes - @q is being aborted, abort flush requests
* @q: request_queue being aborted
*
* To be called from elv_abort_queue(). @q is being aborted. Prepare all
* FLUSH/FUA requests for abortion.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*/
void blk_abort_flushes(struct request_queue *q)
{
struct request *rq, *n;
int i;
/*
* Requests in flight for data are already owned by the dispatch
* queue or the device driver. Just restore for normal completion.
*/
list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
}
/*
* We need to give away requests on flush queues. Restore for
* normal completion and put them on the dispatch queue.
*/
for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
list_for_each_entry_safe(rq, n, &q->flush_queue[i],
flush.list) {
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
list_add_tail(&rq->queuelist, &q->queue_head);
}
}
}
static void bio_end_flush(struct bio *bio, int err)
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (bio->bi_private)
complete(bio->bi_private);
bio_put(bio);
}
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
* @gfp_mask: memory allocation flags (for bio_alloc)
* @error_sector: error sector
*
* Description:
* Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they
* wish to. If WAIT flag is not passed then caller may check only what
* request was pushed in some internal queue for later handling.
*/
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
int ret = 0;
if (bdev->bd_disk == NULL)
return -ENXIO;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
/*
* some block devices may not have their queue correctly set up here
* (e.g. loop device without a backing file) and so issuing a flush
* here will panic. Ensure there is a request function before issuing
* the flush.
*/
if (!q->make_request_fn)
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev;
bio->bi_private = &wait;
bio_get(bio);
submit_bio(WRITE_FLUSH, bio);
wait_for_completion_io(&wait);
/*
* The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be
* copied from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
| gpl-2.0 |
Snuzzo/PLUS_kernel | drivers/staging/vme/bridges/vme_tsi148.c | 2525 | 71479 | /*
* Support for the Tundra TSI148 VME-PCI Bridge Chip
*
* Author: Martyn Welch <martyn.welch@ge.com>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
#include "vme_tsi148.h"
static int __init tsi148_init(void);
static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
static void tsi148_remove(struct pci_dev *);
static void __exit tsi148_exit(void);
/* Module parameter */
static int err_chk;
static int geoid;
static char driver_name[] = "vme_tsi148";
static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
static struct pci_driver tsi148_driver = {
.name = driver_name,
.id_table = tsi148_ids,
.probe = tsi148_probe,
.remove = tsi148_remove,
};
static void reg_join(unsigned int high, unsigned int low,
unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
}
/*
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
int channel_mask)
{
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
wake_up(&bridge->dma_queue[0]);
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
wake_up(&bridge->dma_queue[1]);
serviced |= TSI148_LCSR_INTC_DMA1C;
}
return serviced;
}
/*
* Wake up location monitor queue
*/
static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](i);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
return serviced;
}
/*
* Wake up mail box queue.
*
* XXX This functionality is not exposed up though API.
*/
static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
{
int i;
u32 val;
u32 serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
": 0x%x\n", i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
return serviced;
}
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
"attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
"completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
return TSI148_LCSR_INTC_PERRC;
}
/*
* Save address and status when VME error interrupt occurs.
*/
static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
unsigned int error_addr_high, error_addr_low;
unsigned long long error_addr;
u32 error_attrib;
struct vme_bus_error *error;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
"Occurred\n");
}
error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
if (error) {
error->address = error_addr;
error->attributes = error_attrib;
list_add_tail(&error->list, &tsi148_bridge->vme_errors);
} else {
dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
"VMEbus Error reporting\n");
dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
"0x%llx, attributes: %08x\n", error_addr, error_attrib);
}
/* Clear Status */
iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
return TSI148_LCSR_INTC_VERRC;
}
/*
* Wake up IACK queue.
*/
static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
wake_up(&bridge->iack_queue);
return TSI148_LCSR_INTC_IACKC;
}
/*
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
/*
* Note: Even though the registers are defined as
* 32-bits in the spec, we only want to issue 8-bit
* IACK cycles on the bus, read from offset 3.
*/
vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
vme_irq_handler(tsi148_bridge, i, vec);
serviced |= (1 << i);
}
}
return serviced;
}
/*
* Top level interrupt handler. Clears appropriate interrupt status bits and
* then calls appropriate sub handler(s).
*/
static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = ptr;
bridge = tsi148_bridge->driver_priv;
/* Determine which interrupts are unmasked and set */
enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
/* Only look at unmasked interrupts */
stat &= enable;
if (unlikely(!stat))
return IRQ_NONE;
/* Call subhandlers as appropriate */
/* DMA irqs */
if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
serviced |= tsi148_DMA_irqhandler(bridge, stat);
/* Location monitor irqs */
if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
serviced |= tsi148_LM_irqhandler(bridge, stat);
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
/* IACK irq */
if (stat & TSI148_LCSR_INTS_IACKS)
serviced |= tsi148_IACK_irqhandler(bridge);
/* VME bus irqs */
if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
TSI148_LCSR_INTS_IRQ1S))
serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
/* Clear serviced interrupts */
iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
return IRQ_HANDLED;
}
static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
{
int result;
unsigned int tmp;
struct pci_dev *pdev;
struct tsi148_driver *bridge;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
bridge = tsi148_bridge->driver_priv;
/* Initialise list for VME bus errors */
INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
mutex_init(&tsi148_bridge->irq_mtx);
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
"vector %02X\n", pdev->irq);
return result;
}
/* Enable and unmask interrupts */
tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
TSI148_LCSR_INTEO_IACKEO;
/* This leaves the following interrupts masked.
* TSI148_LCSR_INTEO_VIEEO
* TSI148_LCSR_INTEO_SYSFLEO
* TSI148_LCSR_INTEO_ACFLEO
*/
/* Don't enable Location Monitor interrupts here - they will be
* enabled when the location monitors are properly configured and
* a callback has been attached.
* TSI148_LCSR_INTEO_LM0EO
* TSI148_LCSR_INTEO_LM1EO
* TSI148_LCSR_INTEO_LM2EO
* TSI148_LCSR_INTEO_LM3EO
*/
/* Don't enable VME interrupts until we add a handler, else the board
* will respond to it and we don't want that unless it knows how to
* properly deal with it.
* TSI148_LCSR_INTEO_IRQ7EO
* TSI148_LCSR_INTEO_IRQ6EO
* TSI148_LCSR_INTEO_IRQ5EO
* TSI148_LCSR_INTEO_IRQ4EO
* TSI148_LCSR_INTEO_IRQ3EO
* TSI148_LCSR_INTEO_IRQ2EO
* TSI148_LCSR_INTEO_IRQ1EO
*/
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
return 0;
}
static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
/* Turn off interrupts */
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
/* Clear all interrupts */
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
free_irq(pdev->irq, tsi148_bridge);
}
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
static int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
if (tmp & TSI148_LCSR_VICR_IRQS)
return 0;
else
return 1;
}
/*
* Configure VME interrupt
*/
static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* We need to do the ordering differently for enabling and disabling */
if (state == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = container_of(tsi148_bridge->parent,
struct pci_dev, dev);
synchronize_irq(pdev->irq);
}
} else {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
}
}
/*
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&bridge->vme_int);
/* Read VICR register */
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
/* Set Status/ID */
tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
(statid & TSI148_LCSR_VICR_STID_M);
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* Assert VMEbus IRQ */
tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
tsi148_iack_received(bridge));
mutex_unlock(&bridge->vme_int);
return 0;
}
/*
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each(err_pos, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
valid = vme_err;
break;
}
}
return valid;
}
/*
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
list_del(err_pos);
kfree(vme_err);
}
}
}
/*
* Initialize a slave window with the requested attributes.
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
granularity = 0x10;
addr |= TSI148_LCSR_ITAT_AS_A16;
break;
case VME_A24:
granularity = 0x1000;
addr |= TSI148_LCSR_ITAT_AS_A24;
break;
case VME_A32:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A32;
break;
case VME_A64:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A64;
break;
case VME_CRCSR:
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
default:
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(vme_base, &vme_base_high, &vme_base_low);
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size - granularity;
reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
pci_offset = (unsigned long long)pci_base - vme_base;
reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
if (vme_base_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
"alignment\n");
return -EINVAL;
}
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
temp_ctl &= ~TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
/* Setup mapping */
iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
break;
}
/* Setup cycle types */
temp_ctl &= ~(0x1F << 7);
if (cycle & VME_BLT)
temp_ctl |= TSI148_LCSR_ITAT_BLT;
if (cycle & VME_MBLT)
temp_ctl |= TSI148_LCSR_ITAT_MBLT;
if (cycle & VME_2eVME)
temp_ctl |= TSI148_LCSR_ITAT_2eVME;
if (cycle & VME_2eSST)
temp_ctl |= TSI148_LCSR_ITAT_2eSST;
if (cycle & VME_2eSSTB)
temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
temp_ctl |= addr;
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_ITAT_PGM;
if (cycle & VME_DATA)
temp_ctl |= TSI148_LCSR_ITAT_DATA;
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
if (enabled)
temp_ctl |= TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
return 0;
}
/*
* Get slave window configuration.
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
/* Read registers */
ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(vme_base_high, vme_base_low, vme_base);
reg_join(vme_bound_high, vme_bound_low, &vme_bound);
reg_join(pci_offset_high, pci_offset_low, &pci_offset);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*enabled = 0;
*aspace = 0;
*cycle = 0;
if (ctl & TSI148_LCSR_ITAT_EN)
*enabled = 1;
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
granularity = 0x10;
*aspace |= VME_A16;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
granularity = 0x1000;
*aspace |= VME_A24;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
granularity = 0x10000;
*aspace |= VME_A32;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
granularity = 0x10000;
*aspace |= VME_A64;
}
/* Need granularity before we set the size */
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
*cycle |= VME_2eSST320;
if (ctl & TSI148_LCSR_ITAT_BLT)
*cycle |= VME_BLT;
if (ctl & TSI148_LCSR_ITAT_MBLT)
*cycle |= VME_MBLT;
if (ctl & TSI148_LCSR_ITAT_2eVME)
*cycle |= VME_2eVME;
if (ctl & TSI148_LCSR_ITAT_2eSST)
*cycle |= VME_2eSST;
if (ctl & TSI148_LCSR_ITAT_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_ITAT_SUPR)
*cycle |= VME_SUPER;
if (ctl & TSI148_LCSR_ITAT_NPRIV)
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_ITAT_PGM)
*cycle |= VME_PROG;
if (ctl & TSI148_LCSR_ITAT_DATA)
*cycle |= VME_DATA;
return 0;
}
/*
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
/* If the existing size is OK, return */
if ((size != 0) && (existing_size == (size - 1)))
return 0;
if (existing_size != 0) {
iounmap(image->kern_base);
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(tsi148_bridge->parent, "Unable to allocate "
"memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
}
sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
image->number);
image->bus_resource.start = 0;
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem "
"resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
}
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
return 0;
err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
err_name:
return retval;
}
/*
* Free and unmap PCI Resource
*/
static void tsi148_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
/*
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
int retval = 0;
unsigned int i;
unsigned int temp_ctl = 0;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
dev_err(tsi148_bridge->parent, "Invalid VME Window "
"alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
dev_err(tsi148_bridge->parent, "Size must be non-zero for "
"enabled windows\n");
retval = -EINVAL;
goto err_window;
}
spin_lock(&image->lock);
/* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependent stuff up the stack. If size
* is zero, any existing resource will be freed.
*/
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
"resource\n");
goto err_res;
}
if (size == 0) {
pci_base = 0;
pci_bound = 0;
vme_offset = 0;
} else {
pci_base = (unsigned long long)image->bus_resource.start;
/*
* Bound address is a valid address for the window, adjust
* according to window granularity.
*/
pci_bound = pci_base + (size - 0x10000);
vme_offset = vme_base - pci_base;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(pci_base, &pci_base_high, &pci_base_low);
reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
if (pci_base_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid VME Offset "
"alignment\n");
retval = -EINVAL;
goto err_gran;
}
i = image->number;
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
temp_ctl &= ~TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_BLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
}
if (cycle & VME_MBLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
}
if (cycle & VME_2eVME) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
}
if (cycle & VME_2eSST) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
dev_warn(tsi148_bridge->parent, "Currently not setting "
"Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
/* Setup data width */
temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
switch (dwidth) {
case VME_D16:
temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
break;
case VME_D32:
temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
switch (aspace) {
case VME_A16:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
break;
case VME_A24:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
break;
case VME_A32:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
break;
case VME_A64:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
break;
case VME_CRCSR:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
break;
case VME_USER1:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
break;
case VME_USER2:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
break;
case VME_USER3:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
break;
case VME_USER4:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
}
temp_ctl &= ~(3<<4);
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_OTAT_SUP;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_OTAT_PGM;
/* Setup mapping */
iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
if (enabled)
temp_ctl |= TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
spin_unlock(&image->lock);
return 0;
err_aspace:
err_dwidth:
err_gran:
tsi148_free_resource(image);
err_res:
err_window:
return retval;
}
/*
* Set the attributes of an outbound window.
*
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_base, pci_bound, vme_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(pci_base_high, pci_base_low, &pci_base);
reg_join(pci_bound_high, pci_bound_low, &pci_bound);
reg_join(vme_offset_high, vme_offset_low, &vme_offset);
*vme_base = pci_base + vme_offset;
*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
*enabled = 0;
*aspace = 0;
*cycle = 0;
*dwidth = 0;
if (ctl & TSI148_LCSR_OTAT_EN)
*enabled = 1;
/* Setup address space */
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
*aspace |= VME_A16;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
*aspace |= VME_A24;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
*aspace |= VME_A32;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
*aspace |= VME_A64;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
*aspace |= VME_CRCSR;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
*aspace |= VME_USER1;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
*aspace |= VME_USER2;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
*aspace |= VME_USER3;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
*aspace |= VME_USER4;
/* Setup 2eSST speeds */
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
*cycle |= VME_2eSST320;
/* Setup cycle types */
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
*cycle |= VME_SCT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
*cycle |= VME_BLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
*cycle |= VME_MBLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
*cycle |= VME_2eVME;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
*cycle |= VME_2eSST;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_OTAT_SUP)
*cycle |= VME_SUPER;
else
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_OTAT_PGM)
*cycle |= VME_PROG;
else
*cycle |= VME_DATA;
/* Setup data width */
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
*dwidth = VME_D16;
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
*dwidth = VME_D32;
return 0;
}
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
spin_lock(&image->lock);
memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
retval = count;
if (!err_chk)
goto skip_chk;
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_err(image->parent->parent, "First VME read error detected "
"an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
spin_lock(&image->lock);
memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
retval = count;
/*
* Writes are posted. We need to do a read on the VME bus to flush out
* all of the writes before we check for errors. We can't guarantee
* that reading the data we have just written is safe. It is believed
* that there isn't any read, write re-ordering, so we can read any
* location in VME space, so lets read the Device ID from the tsi148's
* own registers as mapped into CR/CSR space.
*
* We check for saved errors in the written address range/space.
*/
if (!err_chk)
goto skip_chk;
/*
* Get window info first, to maximise the time that the buffers may
* fluch on their own
*/
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
ioread16(bridge->flush_image->kern_base + 0x7F000);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_warn(tsi148_bridge->parent, "First VME write error detected"
" an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&image->lock);
return retval;
}
/*
* Perform an RMW cycle on the VME bus.
*
* Requires a previously configured master window, returns final value.
*/
static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
u32 tmp, result;
int i;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
/* Lock image */
spin_lock(&image->lock);
pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
reg_join(pci_addr_high, pci_addr_low, &pci_addr);
reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
/* Configure registers */
iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
/* Enable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp |= TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
/* Kick process off with a read to the required address. */
result = ioread32be(image->kern_base + offset);
/* Disable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
spin_unlock(&image->lock);
mutex_unlock(&bridge->vme_rmw);
return result;
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DSAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DSAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DSAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DSAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DSAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DSAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DSAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DSAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DSAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DSAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DSAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DSAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DSAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DSAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DSAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DSAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DSAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DSAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DSAT_PGM;
return 0;
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DDAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DDAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DDAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DDAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DDAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DDAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DDAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DDAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DDAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DDAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DDAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DDAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DDAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DDAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DDAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DDAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DDAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DDAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DDAT_PGM;
return 0;
}
/*
* Add a link list descriptor to the list
*/
static int tsi148_dma_list_add(struct vme_dma_list *list,
struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low;
struct vme_dma_pattern *pattern_attr;
struct vme_dma_pci *pci_attr;
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & 0x7) {
dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
"byte boundary as required: %p\n",
&entry->descriptor);
retval = -EINVAL;
goto err_align;
}
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
/* Fill out source part */
switch (src->type) {
case VME_DMA_PATTERN:
pattern_attr = src->private;
entry->descriptor.dsal = pattern_attr->pattern;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
/* Default behaviour is 32 bit pattern */
if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
/* It seems that the default behaviour is to increment */
if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
break;
case VME_DMA_PCI:
pci_attr = src->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = src->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
retval = tsi148_dma_set_vme_src_attributes(
tsi148_bridge->parent, &entry->descriptor.dsat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid source type\n");
retval = -EINVAL;
goto err_source;
break;
}
/* Assume last link - this will be over-written by adding another */
entry->descriptor.dnlau = 0;
entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
/* Fill out destination part */
switch (dest->type) {
case VME_DMA_PCI:
pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
retval = tsi148_dma_set_vme_dest_attributes(
tsi148_bridge->parent, &entry->descriptor.ddat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid destination type\n");
retval = -EINVAL;
goto err_dest;
break;
}
/* Fill out count */
entry->descriptor.dcnt = (u32)count;
/* Add to list */
list_add_tail(&entry->list, &list->entries);
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
/* We need the bus address for the pointer */
desc_ptr = virt_to_bus(&entry->descriptor);
reg_split(desc_ptr, &prev->descriptor.dnlau,
&prev->descriptor.dnlal);
}
return 0;
err_dest:
err_source:
err_align:
kfree(entry);
err_mem:
return retval;
}
/*
* Check to see if the provided DMA channel is busy.
*/
static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (tmp & TSI148_LCSR_DSTA_BSY)
return 0;
else
return 1;
}
/*
* Execute a previously generated link list
*
* XXX Need to provide control register configuration.
*/
static int tsi148_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
int channel, retval = 0;
struct tsi148_dma_entry *entry;
dma_addr_t bus_addr;
u32 bus_addr_high, bus_addr_low;
u32 val, dctlreg = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
ctrlr = list->parent;
tsi148_bridge = ctrlr->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&ctrlr->mtx);
channel = ctrlr->number;
if (!list_empty(&ctrlr->running)) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
} else {
list_add(&list->list, &ctrlr->running);
}
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
bus_addr = virt_to_bus(&entry->descriptor);
mutex_unlock(&ctrlr->mtx);
reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
iowrite32be(bus_addr_high, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
iowrite32be(bus_addr_low, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
/* Start the operation */
iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
wait_event_interruptible(bridge->dma_queue[channel],
tsi148_dma_busy(ctrlr->parent, channel));
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
retval = -EIO;
}
/* Remove list from running list */
mutex_lock(&ctrlr->mtx);
list_del(&list->list);
mutex_unlock(&ctrlr->mtx);
return retval;
}
/*
* Clean up a previously generated link list
*
* We have a separate function, don't assume that the chain can't be reused.
*/
static int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct tsi148_dma_entry *entry;
/* detach and free each entry */
list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct tsi148_dma_entry, list);
kfree(entry);
}
return 0;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
vme_address_t aspace, vme_cycle_t cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
break;
case VME_A24:
lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
break;
case VME_A32:
lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
break;
case VME_A64:
lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
break;
default:
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
lm_ctl |= TSI148_LCSR_LMAT_PGM;
if (cycle & VME_DATA)
lm_ctl |= TSI148_LCSR_LMAT_DATA;
reg_split(lm_base, &lm_base_high, &lm_base_low);
iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
mutex_unlock(&lm->mtx);
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
reg_join(lm_base_high, lm_base_low, lm_base);
if (lm_ctl & TSI148_LCSR_LMAT_EN)
enabled = 1;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
*aspace |= VME_A16;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
*aspace |= VME_A24;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
*aspace |= VME_A32;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
*aspace |= VME_A64;
if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
*cycle |= VME_SUPER;
if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
*cycle |= VME_USER;
if (lm_ctl & TSI148_LCSR_LMAT_PGM)
*cycle |= VME_PROG;
if (lm_ctl & TSI148_LCSR_LMAT_DATA)
*cycle |= VME_DATA;
mutex_unlock(&lm->mtx);
return enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor not properly "
"configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
/* Enable Location Monitor interrupt */
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
lm_ctl |= TSI148_LCSR_LMAT_EN;
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
tmp &= ~TSI148_LCSR_LMAT_EN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Determine Geographical Addressing
*/
static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
if (!geoid) {
slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
} else
slot = geoid;
return (int)slot;
}
static int __init tsi148_init(void)
{
return pci_register_driver(&tsi148_driver);
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*
* Each board has a 512kB window, with the highest 4kB being used for the
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
int retval;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
}
memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
/* Ensure that the CR/CSR is configured at the correct offset */
cbar = ioread32be(bridge->base + TSI148_CBAR);
cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
vstat = tsi148_slot_get(tsi148_bridge);
if (cbar != vstat) {
cbar = vstat;
dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
}
dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN) {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
} else
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
/* If we want flushed, error-checked writes, set up a window
* over the CR/CSR registers. We read from here to safely flush
* through VME writes.
*/
if (err_chk) {
retval = tsi148_master_set(bridge->flush_image, 1,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image"
" failed\n");
}
return 0;
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
bridge->crcsr_bus);
}
static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval, i, master_num;
u32 data;
struct list_head *pos = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *tsi148_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (tsi148_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_struct;
}
tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_driver;
}
tsi148_bridge->driver_priv = tsi148_device;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err_enable;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* map registers in BAR 0 */
tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "CRG region check failed\n");
retval = -EIO;
goto err_test;
}
/* Initialize wait queues & mutual exclusion flags */
init_waitqueue_head(&tsi148_device->dma_queue[0]);
init_waitqueue_head(&tsi148_device->dma_queue[1]);
init_waitqueue_head(&tsi148_device->iack_queue);
mutex_init(&tsi148_device->vme_int);
mutex_init(&tsi148_device->vme_rmw);
tsi148_bridge->parent = &pdev->dev;
strcpy(tsi148_bridge->name, driver_name);
/* Setup IRQ */
retval = tsi148_irq_init(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Initialization failed.\n");
goto err_irq;
}
/* If we are going to flush writes, we need to read from the VME bus.
* We need to do this safely, thus we read the devices own CR/CSR
* register. To do this we must set up a window in CR/CSR space and
* hence have one less master window resource available.
*/
master_num = TSI148_MAX_MASTER;
if (err_chk) {
master_num--;
tsi148_device->flush_image =
kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"flush resource structure\n");
retval = -ENOMEM;
goto err_master;
}
tsi148_device->flush_image->parent = tsi148_bridge;
spin_lock_init(&tsi148_device->flush_image->lock);
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
VME_A32 | VME_A64;
tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
VME_USER | VME_PROG | VME_DATA;
tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
memset(&tsi148_device->flush_image->bus_resource, 0,
sizeof(struct resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
INIT_LIST_HEAD(&tsi148_bridge->master_resources);
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
retval = -ENOMEM;
goto err_master;
}
master_image->parent = tsi148_bridge;
spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
}
/* Add slave windows to list */
INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = tsi148_bridge;
mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
VME_USER3 | VME_USER4;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
&tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_dma;
}
dma_ctrlr->parent = tsi148_bridge;
mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
VME_DMA_PATTERN_TO_MEM;
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
&tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
retval = -ENOMEM;
goto err_lm;
}
lm->parent = tsi148_bridge;
mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
tsi148_bridge->slave_get = tsi148_slave_get;
tsi148_bridge->slave_set = tsi148_slave_set;
tsi148_bridge->master_get = tsi148_master_get;
tsi148_bridge->master_set = tsi148_master_set;
tsi148_bridge->master_read = tsi148_master_read;
tsi148_bridge->master_write = tsi148_master_write;
tsi148_bridge->master_rmw = tsi148_master_rmw;
tsi148_bridge->dma_list_add = tsi148_dma_list_add;
tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
tsi148_bridge->irq_set = tsi148_irq_set;
tsi148_bridge->irq_generate = tsi148_irq_generate;
tsi148_bridge->lm_set = tsi148_lm_set;
tsi148_bridge->lm_get = tsi148_lm_get;
tsi148_bridge->lm_attach = tsi148_lm_attach;
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
err_chk ? "enabled" : "disabled");
if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
goto err_crcsr;
}
retval = vme_register_bridge(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
pci_set_drvdata(pdev, tsi148_bridge);
/* Clear VME bus "board fail", and "power-up reset" lines */
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
data &= ~TSI148_LCSR_VSTAT_BRDFL;
data |= TSI148_LCSR_VSTAT_CPURST;
iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
return 0;
err_reg:
tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
tsi148_irq_exit(tsi148_bridge, pdev);
err_irq:
err_test:
iounmap(tsi148_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
kfree(tsi148_device);
err_driver:
kfree(tsi148_bridge);
err_struct:
return retval;
}
static void tsi148_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct list_head *tmplist;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
int i;
struct tsi148_driver *bridge;
struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
bridge = tsi148_bridge->driver_priv;
dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < 8; i++) {
iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
}
/*
* Shutdown Location monitor.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
/*
* Shutdown CRG map.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
/*
* Clear error status.
*/
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
/*
* Remove VIRQ interrupt (if any)
*/
if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
/*
* Map all Interrupts to PCI INTA
*/
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
tsi148_irq_exit(tsi148_bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(tsi148_bridge->driver_priv);
kfree(tsi148_bridge);
}
static void __exit tsi148_exit(void)
{
pci_unregister_driver(&tsi148_driver);
}
MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
module_init(tsi148_init);
module_exit(tsi148_exit);
| gpl-2.0 |
Toni5830/kernel_janice | drivers/staging/vme/bridges/vme_tsi148.c | 2525 | 71479 | /*
* Support for the Tundra TSI148 VME-PCI Bridge Chip
*
* Author: Martyn Welch <martyn.welch@ge.com>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
#include "vme_tsi148.h"
static int __init tsi148_init(void);
static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
static void tsi148_remove(struct pci_dev *);
static void __exit tsi148_exit(void);
/* Module parameter */
static int err_chk;
static int geoid;
static char driver_name[] = "vme_tsi148";
static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
static struct pci_driver tsi148_driver = {
.name = driver_name,
.id_table = tsi148_ids,
.probe = tsi148_probe,
.remove = tsi148_remove,
};
static void reg_join(unsigned int high, unsigned int low,
unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
}
/*
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
int channel_mask)
{
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
wake_up(&bridge->dma_queue[0]);
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
wake_up(&bridge->dma_queue[1]);
serviced |= TSI148_LCSR_INTC_DMA1C;
}
return serviced;
}
/*
* Wake up location monitor queue
*/
static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](i);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
return serviced;
}
/*
* Wake up mail box queue.
*
* XXX This functionality is not exposed up though API.
*/
static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
{
int i;
u32 val;
u32 serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
": 0x%x\n", i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
return serviced;
}
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
"attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
"completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
return TSI148_LCSR_INTC_PERRC;
}
/*
* Save address and status when VME error interrupt occurs.
*/
static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
unsigned int error_addr_high, error_addr_low;
unsigned long long error_addr;
u32 error_attrib;
struct vme_bus_error *error;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
"Occurred\n");
}
error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
if (error) {
error->address = error_addr;
error->attributes = error_attrib;
list_add_tail(&error->list, &tsi148_bridge->vme_errors);
} else {
dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
"VMEbus Error reporting\n");
dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
"0x%llx, attributes: %08x\n", error_addr, error_attrib);
}
/* Clear Status */
iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
return TSI148_LCSR_INTC_VERRC;
}
/*
* Wake up IACK queue.
*/
static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
wake_up(&bridge->iack_queue);
return TSI148_LCSR_INTC_IACKC;
}
/*
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
/*
* Note: Even though the registers are defined as
* 32-bits in the spec, we only want to issue 8-bit
* IACK cycles on the bus, read from offset 3.
*/
vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
vme_irq_handler(tsi148_bridge, i, vec);
serviced |= (1 << i);
}
}
return serviced;
}
/*
* Top level interrupt handler. Clears appropriate interrupt status bits and
* then calls appropriate sub handler(s).
*/
static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = ptr;
bridge = tsi148_bridge->driver_priv;
/* Determine which interrupts are unmasked and set */
enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
/* Only look at unmasked interrupts */
stat &= enable;
if (unlikely(!stat))
return IRQ_NONE;
/* Call subhandlers as appropriate */
/* DMA irqs */
if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
serviced |= tsi148_DMA_irqhandler(bridge, stat);
/* Location monitor irqs */
if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
serviced |= tsi148_LM_irqhandler(bridge, stat);
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
/* IACK irq */
if (stat & TSI148_LCSR_INTS_IACKS)
serviced |= tsi148_IACK_irqhandler(bridge);
/* VME bus irqs */
if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
TSI148_LCSR_INTS_IRQ1S))
serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
/* Clear serviced interrupts */
iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
return IRQ_HANDLED;
}
static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
{
int result;
unsigned int tmp;
struct pci_dev *pdev;
struct tsi148_driver *bridge;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
bridge = tsi148_bridge->driver_priv;
/* Initialise list for VME bus errors */
INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
mutex_init(&tsi148_bridge->irq_mtx);
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
"vector %02X\n", pdev->irq);
return result;
}
/* Enable and unmask interrupts */
tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
TSI148_LCSR_INTEO_IACKEO;
/* This leaves the following interrupts masked.
* TSI148_LCSR_INTEO_VIEEO
* TSI148_LCSR_INTEO_SYSFLEO
* TSI148_LCSR_INTEO_ACFLEO
*/
/* Don't enable Location Monitor interrupts here - they will be
* enabled when the location monitors are properly configured and
* a callback has been attached.
* TSI148_LCSR_INTEO_LM0EO
* TSI148_LCSR_INTEO_LM1EO
* TSI148_LCSR_INTEO_LM2EO
* TSI148_LCSR_INTEO_LM3EO
*/
/* Don't enable VME interrupts until we add a handler, else the board
* will respond to it and we don't want that unless it knows how to
* properly deal with it.
* TSI148_LCSR_INTEO_IRQ7EO
* TSI148_LCSR_INTEO_IRQ6EO
* TSI148_LCSR_INTEO_IRQ5EO
* TSI148_LCSR_INTEO_IRQ4EO
* TSI148_LCSR_INTEO_IRQ3EO
* TSI148_LCSR_INTEO_IRQ2EO
* TSI148_LCSR_INTEO_IRQ1EO
*/
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
return 0;
}
static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
/* Turn off interrupts */
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
/* Clear all interrupts */
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
free_irq(pdev->irq, tsi148_bridge);
}
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
static int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
if (tmp & TSI148_LCSR_VICR_IRQS)
return 0;
else
return 1;
}
/*
* Configure VME interrupt
*/
static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* We need to do the ordering differently for enabling and disabling */
if (state == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = container_of(tsi148_bridge->parent,
struct pci_dev, dev);
synchronize_irq(pdev->irq);
}
} else {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
}
}
/*
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&bridge->vme_int);
/* Read VICR register */
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
/* Set Status/ID */
tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
(statid & TSI148_LCSR_VICR_STID_M);
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* Assert VMEbus IRQ */
tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
tsi148_iack_received(bridge));
mutex_unlock(&bridge->vme_int);
return 0;
}
/*
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each(err_pos, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
valid = vme_err;
break;
}
}
return valid;
}
/*
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
list_del(err_pos);
kfree(vme_err);
}
}
}
/*
* Initialize a slave window with the requested attributes.
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
granularity = 0x10;
addr |= TSI148_LCSR_ITAT_AS_A16;
break;
case VME_A24:
granularity = 0x1000;
addr |= TSI148_LCSR_ITAT_AS_A24;
break;
case VME_A32:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A32;
break;
case VME_A64:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A64;
break;
case VME_CRCSR:
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
default:
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(vme_base, &vme_base_high, &vme_base_low);
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size - granularity;
reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
pci_offset = (unsigned long long)pci_base - vme_base;
reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
if (vme_base_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
"alignment\n");
return -EINVAL;
}
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
temp_ctl &= ~TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
/* Setup mapping */
iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
break;
}
/* Setup cycle types */
temp_ctl &= ~(0x1F << 7);
if (cycle & VME_BLT)
temp_ctl |= TSI148_LCSR_ITAT_BLT;
if (cycle & VME_MBLT)
temp_ctl |= TSI148_LCSR_ITAT_MBLT;
if (cycle & VME_2eVME)
temp_ctl |= TSI148_LCSR_ITAT_2eVME;
if (cycle & VME_2eSST)
temp_ctl |= TSI148_LCSR_ITAT_2eSST;
if (cycle & VME_2eSSTB)
temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
temp_ctl |= addr;
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_ITAT_PGM;
if (cycle & VME_DATA)
temp_ctl |= TSI148_LCSR_ITAT_DATA;
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
if (enabled)
temp_ctl |= TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
return 0;
}
/*
* Get slave window configuration.
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
/* Read registers */
ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(vme_base_high, vme_base_low, vme_base);
reg_join(vme_bound_high, vme_bound_low, &vme_bound);
reg_join(pci_offset_high, pci_offset_low, &pci_offset);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*enabled = 0;
*aspace = 0;
*cycle = 0;
if (ctl & TSI148_LCSR_ITAT_EN)
*enabled = 1;
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
granularity = 0x10;
*aspace |= VME_A16;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
granularity = 0x1000;
*aspace |= VME_A24;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
granularity = 0x10000;
*aspace |= VME_A32;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
granularity = 0x10000;
*aspace |= VME_A64;
}
/* Need granularity before we set the size */
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
*cycle |= VME_2eSST320;
if (ctl & TSI148_LCSR_ITAT_BLT)
*cycle |= VME_BLT;
if (ctl & TSI148_LCSR_ITAT_MBLT)
*cycle |= VME_MBLT;
if (ctl & TSI148_LCSR_ITAT_2eVME)
*cycle |= VME_2eVME;
if (ctl & TSI148_LCSR_ITAT_2eSST)
*cycle |= VME_2eSST;
if (ctl & TSI148_LCSR_ITAT_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_ITAT_SUPR)
*cycle |= VME_SUPER;
if (ctl & TSI148_LCSR_ITAT_NPRIV)
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_ITAT_PGM)
*cycle |= VME_PROG;
if (ctl & TSI148_LCSR_ITAT_DATA)
*cycle |= VME_DATA;
return 0;
}
/*
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
/* If the existing size is OK, return */
if ((size != 0) && (existing_size == (size - 1)))
return 0;
if (existing_size != 0) {
iounmap(image->kern_base);
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(tsi148_bridge->parent, "Unable to allocate "
"memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
}
sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
image->number);
image->bus_resource.start = 0;
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
&image->bus_resource, size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem "
"resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
}
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
return 0;
err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
err_name:
return retval;
}
/*
* Free and unmap PCI Resource
*/
static void tsi148_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
}
/*
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
int retval = 0;
unsigned int i;
unsigned int temp_ctl = 0;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
dev_err(tsi148_bridge->parent, "Invalid VME Window "
"alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
dev_err(tsi148_bridge->parent, "Size must be non-zero for "
"enabled windows\n");
retval = -EINVAL;
goto err_window;
}
spin_lock(&image->lock);
/* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependent stuff up the stack. If size
* is zero, any existing resource will be freed.
*/
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
"resource\n");
goto err_res;
}
if (size == 0) {
pci_base = 0;
pci_bound = 0;
vme_offset = 0;
} else {
pci_base = (unsigned long long)image->bus_resource.start;
/*
* Bound address is a valid address for the window, adjust
* according to window granularity.
*/
pci_bound = pci_base + (size - 0x10000);
vme_offset = vme_base - pci_base;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(pci_base, &pci_base_high, &pci_base_low);
reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
if (pci_base_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid VME Offset "
"alignment\n");
retval = -EINVAL;
goto err_gran;
}
i = image->number;
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
temp_ctl &= ~TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_BLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
}
if (cycle & VME_MBLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
}
if (cycle & VME_2eVME) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
}
if (cycle & VME_2eSST) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
dev_warn(tsi148_bridge->parent, "Currently not setting "
"Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
/* Setup data width */
temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
switch (dwidth) {
case VME_D16:
temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
break;
case VME_D32:
temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
switch (aspace) {
case VME_A16:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
break;
case VME_A24:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
break;
case VME_A32:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
break;
case VME_A64:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
break;
case VME_CRCSR:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
break;
case VME_USER1:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
break;
case VME_USER2:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
break;
case VME_USER3:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
break;
case VME_USER4:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
break;
default:
spin_unlock(&image->lock);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
}
temp_ctl &= ~(3<<4);
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_OTAT_SUP;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_OTAT_PGM;
/* Setup mapping */
iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
if (enabled)
temp_ctl |= TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
spin_unlock(&image->lock);
return 0;
err_aspace:
err_dwidth:
err_gran:
tsi148_free_resource(image);
err_res:
err_window:
return retval;
}
/*
* Set the attributes of an outbound window.
*
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_base, pci_bound, vme_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(pci_base_high, pci_base_low, &pci_base);
reg_join(pci_bound_high, pci_bound_low, &pci_bound);
reg_join(vme_offset_high, vme_offset_low, &vme_offset);
*vme_base = pci_base + vme_offset;
*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
*enabled = 0;
*aspace = 0;
*cycle = 0;
*dwidth = 0;
if (ctl & TSI148_LCSR_OTAT_EN)
*enabled = 1;
/* Setup address space */
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
*aspace |= VME_A16;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
*aspace |= VME_A24;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
*aspace |= VME_A32;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
*aspace |= VME_A64;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
*aspace |= VME_CRCSR;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
*aspace |= VME_USER1;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
*aspace |= VME_USER2;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
*aspace |= VME_USER3;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
*aspace |= VME_USER4;
/* Setup 2eSST speeds */
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
*cycle |= VME_2eSST320;
/* Setup cycle types */
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
*cycle |= VME_SCT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
*cycle |= VME_BLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
*cycle |= VME_MBLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
*cycle |= VME_2eVME;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
*cycle |= VME_2eSST;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_OTAT_SUP)
*cycle |= VME_SUPER;
else
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_OTAT_PGM)
*cycle |= VME_PROG;
else
*cycle |= VME_DATA;
/* Setup data width */
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
*dwidth = VME_D16;
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
*dwidth = VME_D32;
return 0;
}
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
int retval;
spin_lock(&image->lock);
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
spin_lock(&image->lock);
memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
retval = count;
if (!err_chk)
goto skip_chk;
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_err(image->parent->parent, "First VME read error detected "
"an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&image->lock);
return retval;
}
static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
spin_lock(&image->lock);
memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
retval = count;
/*
* Writes are posted. We need to do a read on the VME bus to flush out
* all of the writes before we check for errors. We can't guarantee
* that reading the data we have just written is safe. It is believed
* that there isn't any read, write re-ordering, so we can read any
* location in VME space, so lets read the Device ID from the tsi148's
* own registers as mapped into CR/CSR space.
*
* We check for saved errors in the written address range/space.
*/
if (!err_chk)
goto skip_chk;
/*
* Get window info first, to maximise the time that the buffers may
* fluch on their own
*/
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
ioread16(bridge->flush_image->kern_base + 0x7F000);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_warn(tsi148_bridge->parent, "First VME write error detected"
" an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&image->lock);
return retval;
}
/*
* Perform an RMW cycle on the VME bus.
*
* Requires a previously configured master window, returns final value.
*/
static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
u32 tmp, result;
int i;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&bridge->vme_rmw);
/* Lock image */
spin_lock(&image->lock);
pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
reg_join(pci_addr_high, pci_addr_low, &pci_addr);
reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
/* Configure registers */
iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
/* Enable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp |= TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
/* Kick process off with a read to the required address. */
result = ioread32be(image->kern_base + offset);
/* Disable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
spin_unlock(&image->lock);
mutex_unlock(&bridge->vme_rmw);
return result;
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DSAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DSAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DSAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DSAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DSAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DSAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DSAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DSAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DSAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DSAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DSAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DSAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DSAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DSAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DSAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DSAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DSAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DSAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DSAT_PGM;
return 0;
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DDAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DDAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DDAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DDAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DDAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DDAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DDAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DDAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DDAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DDAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DDAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DDAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DDAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DDAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DDAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DDAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DDAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DDAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DDAT_PGM;
return 0;
}
/*
* Add a link list descriptor to the list
*/
static int tsi148_dma_list_add(struct vme_dma_list *list,
struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low;
struct vme_dma_pattern *pattern_attr;
struct vme_dma_pci *pci_attr;
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & 0x7) {
dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
"byte boundary as required: %p\n",
&entry->descriptor);
retval = -EINVAL;
goto err_align;
}
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
/* Fill out source part */
switch (src->type) {
case VME_DMA_PATTERN:
pattern_attr = src->private;
entry->descriptor.dsal = pattern_attr->pattern;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
/* Default behaviour is 32 bit pattern */
if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
/* It seems that the default behaviour is to increment */
if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
break;
case VME_DMA_PCI:
pci_attr = src->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = src->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
retval = tsi148_dma_set_vme_src_attributes(
tsi148_bridge->parent, &entry->descriptor.dsat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid source type\n");
retval = -EINVAL;
goto err_source;
break;
}
/* Assume last link - this will be over-written by adding another */
entry->descriptor.dnlau = 0;
entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
/* Fill out destination part */
switch (dest->type) {
case VME_DMA_PCI:
pci_attr = dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
retval = tsi148_dma_set_vme_dest_attributes(
tsi148_bridge->parent, &entry->descriptor.ddat,
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid destination type\n");
retval = -EINVAL;
goto err_dest;
break;
}
/* Fill out count */
entry->descriptor.dcnt = (u32)count;
/* Add to list */
list_add_tail(&entry->list, &list->entries);
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &list->entries) {
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
/* We need the bus address for the pointer */
desc_ptr = virt_to_bus(&entry->descriptor);
reg_split(desc_ptr, &prev->descriptor.dnlau,
&prev->descriptor.dnlal);
}
return 0;
err_dest:
err_source:
err_align:
kfree(entry);
err_mem:
return retval;
}
/*
* Check to see if the provided DMA channel is busy.
*/
static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (tmp & TSI148_LCSR_DSTA_BSY)
return 0;
else
return 1;
}
/*
* Execute a previously generated link list
*
* XXX Need to provide control register configuration.
*/
static int tsi148_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
int channel, retval = 0;
struct tsi148_dma_entry *entry;
dma_addr_t bus_addr;
u32 bus_addr_high, bus_addr_low;
u32 val, dctlreg = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
ctrlr = list->parent;
tsi148_bridge = ctrlr->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&ctrlr->mtx);
channel = ctrlr->number;
if (!list_empty(&ctrlr->running)) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
} else {
list_add(&list->list, &ctrlr->running);
}
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
bus_addr = virt_to_bus(&entry->descriptor);
mutex_unlock(&ctrlr->mtx);
reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
iowrite32be(bus_addr_high, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
iowrite32be(bus_addr_low, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
/* Start the operation */
iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
wait_event_interruptible(bridge->dma_queue[channel],
tsi148_dma_busy(ctrlr->parent, channel));
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
retval = -EIO;
}
/* Remove list from running list */
mutex_lock(&ctrlr->mtx);
list_del(&list->list);
mutex_unlock(&ctrlr->mtx);
return retval;
}
/*
* Clean up a previously generated link list
*
* We have a separate function, don't assume that the chain can't be reused.
*/
static int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct tsi148_dma_entry *entry;
/* detach and free each entry */
list_for_each_safe(pos, temp, &list->entries) {
list_del(pos);
entry = list_entry(pos, struct tsi148_dma_entry, list);
kfree(entry);
}
return 0;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
vme_address_t aspace, vme_cycle_t cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
break;
case VME_A24:
lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
break;
case VME_A32:
lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
break;
case VME_A64:
lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
break;
default:
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
lm_ctl |= TSI148_LCSR_LMAT_PGM;
if (cycle & VME_DATA)
lm_ctl |= TSI148_LCSR_LMAT_DATA;
reg_split(lm_base, &lm_base_high, &lm_base_low);
iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
mutex_unlock(&lm->mtx);
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
reg_join(lm_base_high, lm_base_low, lm_base);
if (lm_ctl & TSI148_LCSR_LMAT_EN)
enabled = 1;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
*aspace |= VME_A16;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
*aspace |= VME_A24;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
*aspace |= VME_A32;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
*aspace |= VME_A64;
if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
*cycle |= VME_SUPER;
if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
*cycle |= VME_USER;
if (lm_ctl & TSI148_LCSR_LMAT_PGM)
*cycle |= VME_PROG;
if (lm_ctl & TSI148_LCSR_LMAT_DATA)
*cycle |= VME_DATA;
mutex_unlock(&lm->mtx);
return enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&lm->mtx);
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor not properly "
"configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
/* Enable Location Monitor interrupt */
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
lm_ctl |= TSI148_LCSR_LMAT_EN;
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&lm->mtx);
/* Disable Location Monitor and ensure previous interrupts are clear */
lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
tmp &= ~TSI148_LCSR_LMAT_EN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&lm->mtx);
return 0;
}
/*
* Determine Geographical Addressing
*/
static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
if (!geoid) {
slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
} else
slot = geoid;
return (int)slot;
}
static int __init tsi148_init(void)
{
return pci_register_driver(&tsi148_driver);
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*
* Each board has a 512kB window, with the highest 4kB being used for the
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
int retval;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
}
memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
/* Ensure that the CR/CSR is configured at the correct offset */
cbar = ioread32be(bridge->base + TSI148_CBAR);
cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
vstat = tsi148_slot_get(tsi148_bridge);
if (cbar != vstat) {
cbar = vstat;
dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
}
dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN) {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
} else
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
/* If we want flushed, error-checked writes, set up a window
* over the CR/CSR registers. We read from here to safely flush
* through VME writes.
*/
if (err_chk) {
retval = tsi148_master_set(bridge->flush_image, 1,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image"
" failed\n");
}
return 0;
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
bridge->crcsr_bus);
}
static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval, i, master_num;
u32 data;
struct list_head *pos = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *tsi148_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (tsi148_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_struct;
}
tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_driver;
}
tsi148_bridge->driver_priv = tsi148_device;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err_enable;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* map registers in BAR 0 */
tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "CRG region check failed\n");
retval = -EIO;
goto err_test;
}
/* Initialize wait queues & mutual exclusion flags */
init_waitqueue_head(&tsi148_device->dma_queue[0]);
init_waitqueue_head(&tsi148_device->dma_queue[1]);
init_waitqueue_head(&tsi148_device->iack_queue);
mutex_init(&tsi148_device->vme_int);
mutex_init(&tsi148_device->vme_rmw);
tsi148_bridge->parent = &pdev->dev;
strcpy(tsi148_bridge->name, driver_name);
/* Setup IRQ */
retval = tsi148_irq_init(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Initialization failed.\n");
goto err_irq;
}
/* If we are going to flush writes, we need to read from the VME bus.
* We need to do this safely, thus we read the devices own CR/CSR
* register. To do this we must set up a window in CR/CSR space and
* hence have one less master window resource available.
*/
master_num = TSI148_MAX_MASTER;
if (err_chk) {
master_num--;
tsi148_device->flush_image =
kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"flush resource structure\n");
retval = -ENOMEM;
goto err_master;
}
tsi148_device->flush_image->parent = tsi148_bridge;
spin_lock_init(&tsi148_device->flush_image->lock);
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
VME_A32 | VME_A64;
tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
VME_USER | VME_PROG | VME_DATA;
tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
memset(&tsi148_device->flush_image->bus_resource, 0,
sizeof(struct resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
INIT_LIST_HEAD(&tsi148_bridge->master_resources);
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
retval = -ENOMEM;
goto err_master;
}
master_image->parent = tsi148_bridge;
spin_lock_init(&master_image->lock);
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
}
/* Add slave windows to list */
INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = tsi148_bridge;
mutex_init(&slave_image->mtx);
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
VME_USER3 | VME_USER4;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&slave_image->list,
&tsi148_bridge->slave_resources);
}
/* Add dma engines to list */
INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_dma;
}
dma_ctrlr->parent = tsi148_bridge;
mutex_init(&dma_ctrlr->mtx);
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
VME_DMA_PATTERN_TO_MEM;
INIT_LIST_HEAD(&dma_ctrlr->pending);
INIT_LIST_HEAD(&dma_ctrlr->running);
list_add_tail(&dma_ctrlr->list,
&tsi148_bridge->dma_resources);
}
/* Add location monitor to list */
INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
retval = -ENOMEM;
goto err_lm;
}
lm->parent = tsi148_bridge;
mutex_init(&lm->mtx);
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
tsi148_bridge->slave_get = tsi148_slave_get;
tsi148_bridge->slave_set = tsi148_slave_set;
tsi148_bridge->master_get = tsi148_master_get;
tsi148_bridge->master_set = tsi148_master_set;
tsi148_bridge->master_read = tsi148_master_read;
tsi148_bridge->master_write = tsi148_master_write;
tsi148_bridge->master_rmw = tsi148_master_rmw;
tsi148_bridge->dma_list_add = tsi148_dma_list_add;
tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
tsi148_bridge->irq_set = tsi148_irq_set;
tsi148_bridge->irq_generate = tsi148_irq_generate;
tsi148_bridge->lm_set = tsi148_lm_set;
tsi148_bridge->lm_get = tsi148_lm_get;
tsi148_bridge->lm_attach = tsi148_lm_attach;
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
err_chk ? "enabled" : "disabled");
if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
goto err_crcsr;
}
retval = vme_register_bridge(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
pci_set_drvdata(pdev, tsi148_bridge);
/* Clear VME bus "board fail", and "power-up reset" lines */
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
data &= ~TSI148_LCSR_VSTAT_BRDFL;
data |= TSI148_LCSR_VSTAT_CPURST;
iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
return 0;
err_reg:
tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->lm_resources) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each(pos, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
tsi148_irq_exit(tsi148_bridge, pdev);
err_irq:
err_test:
iounmap(tsi148_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
kfree(tsi148_device);
err_driver:
kfree(tsi148_bridge);
err_struct:
return retval;
}
static void tsi148_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct list_head *tmplist;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
int i;
struct tsi148_driver *bridge;
struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
bridge = tsi148_bridge->driver_priv;
dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < 8; i++) {
iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
}
/*
* Shutdown Location monitor.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
/*
* Shutdown CRG map.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
/*
* Clear error status.
*/
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
/*
* Remove VIRQ interrupt (if any)
*/
if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
/*
* Map all Interrupts to PCI INTA
*/
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
tsi148_irq_exit(tsi148_bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(tsi148_bridge->driver_priv);
kfree(tsi148_bridge);
}
static void __exit tsi148_exit(void)
{
pci_unregister_driver(&tsi148_driver);
}
MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
module_init(tsi148_init);
module_exit(tsi148_exit);
| gpl-2.0 |
BytecodeMe/moto_omap4 | drivers/scsi/qla4xxx/ql4_isr.c | 2781 | 30630 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
/**
* qla4xxx_copy_sense - copy sense data into cmd sense buffer
* @ha: Pointer to host adapter structure.
* @sts_entry: Pointer to status entry structure.
* @srb: Pointer to srb structure.
**/
static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
struct status_entry *sts_entry,
struct srb *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
uint16_t sense_len;
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
if (sense_len == 0) {
DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
" sense len 0\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
ha->status_srb = NULL;
return;
}
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
srb->req_sense_ptr = cmd->sense_buffer;
srb->req_sense_len = sense_len;
/* Copy sense from sts_entry pkt */
sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->senseData[2] & 0x0f,
sts_entry->senseData[7],
sts_entry->senseData[12],
sts_entry->senseData[13]));
DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
srb->flags |= SRB_GOT_SENSE;
/* Update srb, in case a sts_cont pkt follows */
srb->req_sense_ptr += sense_len;
srb->req_sense_len -= sense_len;
if (srb->req_sense_len != 0)
ha->status_srb = srb;
else
ha->status_srb = NULL;
}
/**
* qla4xxx_status_cont_entry - Process a Status Continuations entry.
* @ha: SCSI driver HA context
* @sts_cont: Entry pointer
*
* Extended sense data.
*/
static void
qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
struct status_cont_entry *sts_cont)
{
struct srb *srb = ha->status_srb;
struct scsi_cmnd *cmd;
uint16_t sense_len;
if (srb == NULL)
return;
cmd = srb->cmd;
if (cmd == NULL) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
"back to OS srb=%p srb->state:%d\n", ha->host_no,
__func__, srb, srb->state));
ha->status_srb = NULL;
return;
}
/* Copy sense data. */
sense_len = min_t(uint16_t, srb->req_sense_len,
IOCB_MAX_EXT_SENSEDATA_LEN);
memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
srb->req_sense_ptr += sense_len;
srb->req_sense_len -= sense_len;
/* Place command on done queue. */
if (srb->req_sense_len == 0) {
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
ha->status_srb = NULL;
}
}
/**
* qla4xxx_status_entry - processes status IOCBs
* @ha: Pointer to host adapter structure.
* @sts_entry: Pointer to status entry structure.
**/
static void qla4xxx_status_entry(struct scsi_qla_host *ha,
struct status_entry *sts_entry)
{
uint8_t scsi_status;
struct scsi_cmnd *cmd;
struct srb *srb;
struct ddb_entry *ddb_entry;
uint32_t residual;
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
"handle 0x%x, sp=%p. This cmd may have already "
"been completed.\n", ha->host_no, __func__,
le32_to_cpu(sts_entry->handle), srb));
ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
" handle=0x%0x\n", __func__, sts_entry->handle);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
return;
}
cmd = srb->cmd;
if (cmd == NULL) {
DEBUG2(printk("scsi%ld: %s: Command already returned back to "
"OS pkt->handle=%d srb=%p srb->state:%d\n",
ha->host_no, __func__, sts_entry->handle,
srb, srb->state));
ql4_printk(KERN_WARNING, ha, "Command is NULL:"
" already returned to OS (srb=%p)\n", srb);
return;
}
ddb_entry = srb->ddb;
if (ddb_entry == NULL) {
cmd->result = DID_NO_CONNECT << 16;
goto status_entry_exit;
}
residual = le32_to_cpu(sts_entry->residualByteCnt);
/* Translate ISP error to a Linux SCSI error. */
scsi_status = sts_entry->scsiStatus;
switch (sts_entry->completionStatus) {
case SCS_COMPLETE:
if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
cmd->result = DID_ERROR << 16;
break;
}
if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
scsi_set_resid(cmd, residual);
if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
cmd->underflow)) {
cmd->result = DID_ERROR << 16;
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
"Mid-layer Data underrun0, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
scsi_bufflen(cmd), residual));
break;
}
}
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
qla4xxx_copy_sense(ha, sts_entry, srb);
break;
case SCS_INCOMPLETE:
/* Always set the status to DID_ERROR, since
* all conditions result in that status anyway */
cmd->result = DID_ERROR << 16;
break;
case SCS_RESET_OCCURRED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_ABORTED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_TIMEOUT:
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/*
* Mark device missing so that we won't continue to send
* I/O to this device. We should get a ddb state change
* AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
break;
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
cmd->result = DID_ERROR << 16;
break;
}
scsi_set_resid(cmd, residual);
/*
* If there is scsi_status, it takes precedense over
* underflow condition.
*/
if (scsi_status != 0) {
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
qla4xxx_copy_sense(ha, sts_entry, srb);
} else {
/*
* If RISC reports underrun and target does not
* report it then we must have a lost frame, so
* tell upper layer to retry it by reporting a
* bus busy.
*/
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
} else if ((scsi_bufflen(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
*
* For kernels less than 2.4, the driver must
* return an error if an underflow is detected.
* For kernels equal-to and above 2.4, the
* mid-layer will appearantly handle the
* underflow by detecting the residual count --
* unfortunately, we do not see where this is
* actually being done. In the interim, we
* will return DID_ERROR.
*/
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
"Mid-layer Data underrun1, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
scsi_bufflen(cmd), residual));
cmd->result = DID_ERROR << 16;
} else {
cmd->result = DID_OK << 16;
}
}
break;
case SCS_DEVICE_LOGGED_OUT:
case SCS_DEVICE_UNAVAILABLE:
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
"state: 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, sts_entry->completionStatus));
/*
* Mark device missing so that we won't continue to
* send I/O to this device. We should get a ddb
* state change AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
case SCS_QUEUE_FULL:
/*
* SCSI Mid-Layer handles device queue full
*/
cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
"compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
" iResp=%02x\n", ha->host_no, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->completionStatus,
sts_entry->scsiStatus, sts_entry->state_flags,
sts_entry->iscsiFlags,
sts_entry->iscsiResponse));
break;
default:
cmd->result = DID_ERROR << 16;
break;
}
status_entry_exit:
/* complete the request, if not waiting for status_continuation pkt */
srb->cc_stat = sts_entry->completionStatus;
if (ha->status_srb == NULL)
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
/**
* qla4xxx_process_response_queue - process response queue completions
* @ha: Pointer to host adapter structure.
*
* This routine process response queue completions in interrupt context.
* Hardware_lock locked upon entry
**/
void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
{
uint32_t count = 0;
struct srb *srb = NULL;
struct status_entry *sts_entry;
/* Process all responses from response queue */
while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
sts_entry = (struct status_entry *) ha->response_ptr;
count++;
/* Advance pointers for next entry */
if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
ha->response_out = 0;
ha->response_ptr = ha->response_ring;
} else {
ha->response_out++;
ha->response_ptr++;
}
/* process entry */
switch (sts_entry->hdr.entryType) {
case ET_STATUS:
/* Common status */
qla4xxx_status_entry(ha, sts_entry);
break;
case ET_PASSTHRU_STATUS:
break;
case ET_STATUS_CONTINUATION:
qla4xxx_status_cont_entry(ha,
(struct status_cont_entry *) sts_entry);
break;
case ET_COMMAND:
/* ISP device queue is full. Command not
* accepted by ISP. Queue command for
* later */
srb = qla4xxx_del_from_active_array(ha,
le32_to_cpu(sts_entry->
handle));
if (srb == NULL)
goto exit_prq_invalid_handle;
DEBUG2(printk("scsi%ld: %s: FW device queue full, "
"srb %p\n", ha->host_no, __func__, srb));
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
break;
case ET_CONTINUE:
/* Just throw away the continuation entries */
DEBUG2(printk("scsi%ld: %s: Continuation entry - "
"ignoring\n", ha->host_no, __func__));
break;
default:
/*
* Invalid entry in response queue, reset RISC
* firmware.
*/
DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
"response queue \n", ha->host_no,
__func__,
sts_entry->hdr.entryType));
goto exit_prq_error;
}
((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
wmb();
}
/*
* Tell ISP we're done with response(s). This also clears the interrupt.
*/
ha->isp_ops->complete_iocb(ha);
return;
exit_prq_invalid_handle:
DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
ha->host_no, __func__, srb, sts_entry->hdr.entryType,
sts_entry->completionStatus));
exit_prq_error:
ha->isp_ops->complete_iocb(ha);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
*
* This routine decodes the mailbox status during the ISR.
* Hardware_lock locked upon entry. runs in interrupt context.
**/
static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_status)
{
int i;
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
(mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
ha->mbox_status[0] = mbox_status;
if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
/*
* Copy all mailbox registers to a temporary
* location and set mailbox command done flag
*/
for (i = 0; i < ha->mbox_status_count; i++)
ha->mbox_status[i] = is_qla8022(ha)
? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
: readl(&ha->reg->mailbox[i]);
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
complete(&ha->mbx_intr_comp);
}
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = is_qla8022(ha)
? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
: readl(&ha->reg->mailbox[i]);
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
if (ha->aen_log.count < MAX_AEN_ENTRIES) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
mbox_sts[i];
ha->aen_log.count++;
}
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
qla4xxx_dump_registers(ha);
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
} else {
set_bit(AF_GET_CRASH_RECORD, &ha->flags);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
break;
case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
case MBOX_ASTS_NVRAM_INVALID:
case MBOX_ASTS_IP_ADDRESS_CHANGED:
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
set_bit(AF_LINK_UP, &ha->flags);
if (test_bit(AF_INIT_DONE, &ha->flags))
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
break;
case MBOX_ASTS_LINK_DOWN:
clear_bit(AF_LINK_UP, &ha->flags);
if (test_bit(AF_INIT_DONE, &ha->flags))
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
break;
case MBOX_ASTS_HEARTBEAT:
ha->seconds_since_last_heartbeat = 0;
break;
case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
"ACQUIRED\n", ha->host_no, mbox_status));
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
break;
case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
* mode
* only */
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
break;
case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
"mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
mbox_sts[2], mbox_sts[3]);
/* mbox_sts[2] = Old ACB state
* mbox_sts[3] = new ACB state */
if ((mbox_sts[3] == ACB_STATE_VALID) &&
((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
(mbox_sts[2] == ACB_STATE_ACQUIRING)))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
(mbox_sts[2] == ACB_STATE_VALID))
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
"mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
ha->host_no, mbox_sts[0],
mbox_sts[1], mbox_sts[2]));
break;
case MBOX_ASTS_SELF_TEST_FAILED:
case MBOX_ASTS_LOGIN_FAILED:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
"mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
ha->host_no, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3]));
break;
case MBOX_ASTS_DATABASE_CHANGED:
/* Queue AEN information and process it in the DPC
* routine */
if (ha->aen_q_count > 0) {
/* decrement available counter */
ha->aen_q_count--;
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
ha->aen_q[ha->aen_in].mbox_sts[i] =
mbox_sts[i];
/* print debug message */
DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
"mb1:0x%x mb2:0x%x mb3:0x%x "
"mb4:0x%x mb5:0x%x\n",
ha->host_no, ha->aen_in,
mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3],
mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
if (ha->aen_in == MAX_AEN_ENTRIES)
ha->aen_in = 0;
/* The DPC routine will process the aen */
set_bit(DPC_AEN, &ha->dpc_flags);
} else {
DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
"overflowed! AEN LOST!!\n",
ha->host_no, __func__,
mbox_sts[0]));
DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
ha->host_no));
for (i = 0; i < MAX_AEN_ENTRIES; i++) {
DEBUG2(printk("AEN[%d] %04x %04x %04x "
"%04x\n", i, mbox_sts[0],
mbox_sts[1], mbox_sts[2],
mbox_sts[3]));
}
}
break;
case MBOX_ASTS_TXSCVR_INSERTED:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x Transceiver"
" inserted\n", ha->host_no, mbox_sts[0]));
break;
case MBOX_ASTS_TXSCVR_REMOVED:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x Transceiver"
" removed\n", ha->host_no, mbox_sts[0]));
break;
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
ha->host_no, mbox_sts[0]));
break;
}
} else {
DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
ha->host_no, mbox_status));
ha->mbox_status[0] = mbox_status;
}
}
/**
* qla4_8xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
if (intr_status & HSRX_RISC_IOCB_INT)
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
if (intr_status & HSRX_RISC_MB_INT)
qla4xxx_isr_decode_mailbox(ha,
readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
/* clear the interrupt */
writel(0, &ha->qla4_8xxx_reg->host_int);
readl(&ha->qla4_8xxx_reg->host_int);
}
/**
* qla4xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
if (intr_status & CSR_SCSI_COMPLETION_INTR)
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
qla4xxx_isr_decode_mailbox(ha,
readl(&ha->reg->mailbox[0]));
/* Clear Mailbox Interrupt */
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
}
/**
* qla4_8xxx_spurious_interrupt - processes spurious interrupt
* @ha: pointer to host adapter structure.
* @reqs_count: .
*
**/
static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
uint8_t reqs_count)
{
if (reqs_count)
return;
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_8xxx_reg->host_int);
if (test_bit(AF_INTx_ENABLED, &ha->flags))
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
ha->spurious_int_count++;
}
/**
* qla4xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
**/
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha;
uint32_t intr_status;
unsigned long flags = 0;
uint8_t reqs_count = 0;
ha = (struct scsi_qla_host *) dev_id;
if (!ha) {
DEBUG2(printk(KERN_INFO
"qla4xxx: Interrupt with NULL host ptr\n"));
return IRQ_NONE;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isr_count++;
/*
* Repeatedly service interrupts up to a maximum of
* MAX_REQS_SERVICED_PER_INTR
*/
while (1) {
/*
* Read interrupt status
*/
if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
ha->response_out)
intr_status = CSR_SCSI_COMPLETION_INTR;
else
intr_status = readl(&ha->reg->ctrl_status);
if ((intr_status &
(CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
if (reqs_count == 0)
ha->spurious_int_count++;
break;
}
if (intr_status & CSR_FATAL_ERROR) {
DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
"Status 0x%04x\n", ha->host_no,
readl(isp_port_error_status (ha))));
/* Issue Soft Reset to clear this error condition.
* This will prevent the RISC from repeatedly
* interrupting the driver; thus, allowing the DPC to
* get scheduled to continue error recovery.
* NOTE: Disabling RISC interrupts does not work in
* this case, as CSR_FATAL_ERROR overrides
* CSR_SCSI_INTR_ENABLE */
if ((readl(&ha->reg->ctrl_status) &
CSR_SCSI_RESET_INTR) == 0) {
writel(set_rmask(CSR_SOFT_RESET),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
writel(set_rmask(CSR_FATAL_ERROR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
__qla4xxx_disable_intrs(ha);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
} else if (intr_status & CSR_SCSI_RESET_INTR) {
clear_bit(AF_ONLINE, &ha->flags);
__qla4xxx_disable_intrs(ha);
writel(set_rmask(CSR_SCSI_RESET_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
if (!test_bit(AF_HA_REMOVAL, &ha->flags))
set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
break;
} else if (intr_status & INTR_PENDING) {
ha->isp_ops->interrupt_service_routine(ha, intr_status);
ha->total_io_count++;
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
/**
* qla4_8xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
**/
irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
uint32_t intr_status;
uint32_t status;
unsigned long flags = 0;
uint8_t reqs_count = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
return IRQ_HANDLED;
ha->isr_count++;
status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
if (!(status & ha->nx_legacy_intr.int_vec_bit))
return IRQ_NONE;
status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s legacy Int not triggered\n", __func__));
return IRQ_NONE;
}
/* clear the interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (1) {
if (!(readl(&ha->qla4_8xxx_reg->host_int) &
ISRX_82XX_RISC_INT)) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
intr_status = readl(&ha->qla4_8xxx_reg->host_status);
if ((intr_status &
(HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
ha->isp_ops->interrupt_service_routine(ha, intr_status);
/* Enable Interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
irqreturn_t
qla4_8xxx_msi_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha;
ha = (struct scsi_qla_host *) dev_id;
if (!ha) {
DEBUG2(printk(KERN_INFO
"qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
return IRQ_NONE;
}
ha->isr_count++;
/* clear the interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
return qla4_8xxx_default_intr_handler(irq, dev_id);
}
/**
* qla4_8xxx_default_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
*
* This interrupt handler is called directly for MSI-X, and
* called indirectly for MSI.
**/
irqreturn_t
qla4_8xxx_default_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
uint32_t intr_status;
uint8_t reqs_count = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (1) {
if (!(readl(&ha->qla4_8xxx_reg->host_int) &
ISRX_82XX_RISC_INT)) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
intr_status = readl(&ha->qla4_8xxx_reg->host_status);
if ((intr_status &
(HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
ha->isr_count++;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
irqreturn_t
qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_process_response_queue(ha);
writel(0, &ha->qla4_8xxx_reg->host_int);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isr_count++;
return IRQ_HANDLED;
}
/**
* qla4xxx_process_aen - processes AENs generated by firmware
* @ha: pointer to host adapter structure.
* @process_aen: type of AENs to process
*
* Processes specific types of Asynchronous Events generated by firmware.
* The type of AENs to process is specified by process_aen and can be
* PROCESS_ALL_AENS 0
* FLUSH_DDB_CHANGED_AENS 1
* RELOGIN_DDB_CHANGED_AENS 2
**/
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
{
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
struct aen *aen;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (ha->aen_out != ha->aen_in) {
aen = &ha->aen_q[ha->aen_out];
/* copy aen information to local structure */
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = aen->mbox_sts[i];
ha->aen_q_count++;
ha->aen_out++;
if (ha->aen_out == MAX_AEN_ENTRIES)
ha->aen_out = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
mbox_sts[0], mbox_sts[1], mbox_sts[2],
mbox_sts[3], mbox_sts[4]));
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
if (process_aen == FLUSH_DDB_CHANGED_AENS) {
DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
"[%d] state=%04x FLUSHED!\n",
ha->host_no, ha->aen_out,
mbox_sts[0], mbox_sts[2],
mbox_sts[3]));
break;
}
case PROCESS_ALL_AENS:
default:
if (mbox_sts[1] == 0) { /* Global DB change. */
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
mbox_sts[3], mbox_sts[4]);
}
break;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
int ret;
if (!is_qla8022(ha))
goto try_intx;
if (ql4xenablemsix == 2)
goto try_msi;
if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
goto try_intx;
/* Trying MSI-X */
ret = qla4_8xxx_enable_msix(ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X).\n", ha->revision_id));
goto irq_attached;
}
ql4_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to MSI mode -- %d.\n", ret);
try_msi:
/* Trying MSI */
ret = pci_enable_msi(ha->pdev);
if (!ret) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
pci_disable_msi(ha->pdev);
}
}
ql4_printk(KERN_WARNING, ha,
"MSI: Falling back-to INTx mode -- %d.\n", ret);
try_intx:
/* Trying INTx */
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"INTx: Failed to reserve interrupt %d already in"
" use.\n", ha->pdev->irq);
return ret;
}
irq_attached:
set_bit(AF_IRQ_ATTACHED, &ha->flags);
ha->host->irq = ha->pdev->irq;
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
__func__, ha->pdev->irq);
return ret;
}
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
if (test_bit(AF_MSIX_ENABLED, &ha->flags))
qla4_8xxx_disable_msix(ha);
else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
free_irq(ha->pdev->irq, ha);
pci_disable_msi(ha->pdev);
} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
free_irq(ha->pdev->irq, ha);
}
| gpl-2.0 |
xenord/TrebonKernel-3.0.101 | drivers/net/enic/enic_pp.c | 2781 | 6841 | /*
* Copyright 2011 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/ip.h>
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
#include "enic_dev.h"
static int enic_set_port_profile(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
char uuid_str[38];
char client_mac_str[18];
u8 *client_mac;
int err;
if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
return -EINVAL;
vp = vic_provinfo_alloc(GFP_KERNEL, oui,
VIC_PROVINFO_GENERIC_TYPE);
if (!vp)
return -ENOMEM;
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
strlen(enic->pp.name) + 1, enic->pp.name);
if (!is_zero_ether_addr(enic->pp.mac_addr))
client_mac = enic->pp.mac_addr;
else
client_mac = netdev->dev_addr;
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
ETH_ALEN, client_mac);
snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
sizeof(client_mac_str), client_mac_str);
if (enic->pp.set & ENIC_SET_INSTANCE) {
sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
sizeof(uuid_str), uuid_str);
}
if (enic->pp.set & ENIC_SET_HOST) {
sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
sizeof(uuid_str), uuid_str);
}
VIC_PROVINFO_ADD_TLV(vp,
VIC_GENERIC_PROV_TLV_OS_TYPE,
sizeof(os_type), &os_type);
err = enic_dev_status_to_errno(enic_dev_init_prov2(enic, vp));
add_tlv_failure:
vic_provinfo_free(vp);
return err;
}
static int enic_unset_port_profile(struct enic *enic)
{
int err;
err = enic_vnic_dev_deinit(enic);
if (err)
return enic_dev_status_to_errno(err);
enic_reset_addr_lists(enic);
return 0;
}
static int enic_are_pp_different(struct enic_port_profile *pp1,
struct enic_port_profile *pp2)
{
return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
pp2->instance_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
!!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
}
static int enic_pp_preassociate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp);
static int enic_pp_disassociate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp);
static int enic_pp_preassociate_rr(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp);
static int enic_pp_associate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp);
static int (*enic_pp_handlers[])(struct enic *enic,
struct enic_port_profile *prev_state, int *restore_pp) = {
[PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
[PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
[PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
[PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
};
static const int enic_pp_handlers_count =
sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
static int enic_pp_preassociate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp)
{
return -EOPNOTSUPP;
}
static int enic_pp_disassociate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp)
{
return enic_unset_port_profile(enic);
}
static int enic_pp_preassociate_rr(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp)
{
int err;
int active = 0;
if (enic->pp.request != PORT_REQUEST_ASSOCIATE) {
/* If pre-associate is not part of an associate.
We always disassociate first */
err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic,
prev_pp, restore_pp);
if (err)
return err;
*restore_pp = 0;
}
*restore_pp = 0;
err = enic_set_port_profile(enic);
if (err)
return err;
/* If pre-associate is not part of an associate. */
if (enic->pp.request != PORT_REQUEST_ASSOCIATE)
err = enic_dev_status_to_errno(enic_dev_enable2(enic, active));
return err;
}
static int enic_pp_associate(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp)
{
int err;
int active = 1;
/* Check if a pre-associate was called before */
if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
(prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
enic_are_pp_different(prev_pp, &enic->pp))) {
err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
enic, prev_pp, restore_pp);
if (err)
return err;
*restore_pp = 0;
}
err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
enic, prev_pp, restore_pp);
if (err)
return err;
*restore_pp = 0;
return enic_dev_status_to_errno(enic_dev_enable2(enic, active));
}
int enic_process_set_pp_request(struct enic *enic,
struct enic_port_profile *prev_pp, int *restore_pp)
{
if (enic->pp.request < enic_pp_handlers_count
&& enic_pp_handlers[enic->pp.request])
return enic_pp_handlers[enic->pp.request](enic,
prev_pp, restore_pp);
else
return -EOPNOTSUPP;
}
int enic_process_get_pp_request(struct enic *enic, int request,
u16 *response)
{
int err, status = ERR_SUCCESS;
switch (request) {
case PORT_REQUEST_PREASSOCIATE_RR:
case PORT_REQUEST_ASSOCIATE:
err = enic_dev_enable2_done(enic, &status);
break;
case PORT_REQUEST_DISASSOCIATE:
err = enic_dev_deinit_done(enic, &status);
break;
default:
return -EINVAL;
}
if (err)
status = err;
switch (status) {
case ERR_SUCCESS:
*response = PORT_PROFILE_RESPONSE_SUCCESS;
break;
case ERR_EINVAL:
*response = PORT_PROFILE_RESPONSE_INVALID;
break;
case ERR_EBADSTATE:
*response = PORT_PROFILE_RESPONSE_BADSTATE;
break;
case ERR_ENOMEM:
*response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
break;
case ERR_EINPROGRESS:
*response = PORT_PROFILE_RESPONSE_INPROGRESS;
break;
default:
*response = PORT_PROFILE_RESPONSE_ERROR;
break;
}
return 0;
}
| gpl-2.0 |
Split-Screen/android_kernel_amazon_otter-common | drivers/scsi/qla4xxx/ql4_nvram.c | 3037 | 5956 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
{
writel(cmd, isp_nvram(ha));
readl(isp_nvram(ha));
udelay(1);
}
static inline int eeprom_size(struct scsi_qla_host *ha)
{
return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
}
static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
{
return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
FM93C86A_NO_ADDR_BITS_16 ;
}
static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
{
return FM93C56A_DATA_BITS_16;
}
static int fm93c56a_select(struct scsi_qla_host * ha)
{
DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
{
int i;
int mask;
int dataBit;
int previousBit;
/* Clock in a zero, then do the start bit. */
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, ha);
mask = 1 << (FM93C56A_CMD_BITS - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < FM93C56A_CMD_BITS; i++) {
dataBit =
(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
cmd = cmd << 1;
}
mask = 1 << (eeprom_no_addr_bits(ha) - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
previousBit = dataBit;
}
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, ha);
addr = addr << 1;
}
return 1;
}
static int fm93c56a_deselect(struct scsi_qla_host * ha)
{
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
eeprom_cmd(ha->eeprom_cmd_data, ha);
return 1;
}
static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
{
int i;
int data = 0;
int dataBit;
/* Read the data bits
* The first bit is a dummy. Clock right over it. */
for (i = 0; i < eeprom_no_data_bits(ha); i++) {
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, ha);
eeprom_cmd(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, ha);
dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
data = (data << 1) | dataBit;
}
*value = data;
return 1;
}
static int eeprom_readword(int eepromAddr, u16 * value,
struct scsi_qla_host * ha)
{
fm93c56a_select(ha);
fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
fm93c56a_datain(ha, value);
fm93c56a_deselect(ha);
return 1;
}
/* Hardware_lock must be set before calling */
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
{
u16 val = 0;
/* NOTE: NVRAM uses half-word addresses */
eeprom_readword(offset, &val, ha);
return val;
}
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
{
int status = QLA_ERROR;
uint16_t checksum = 0;
uint32_t index;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (index = 0; index < eeprom_size(ha); index++)
checksum += rd_nvram_word(ha, index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (checksum == 0)
status = QLA_SUCCESS;
return status;
}
/*************************************************************************
*
* Hardware Semaphore routines
*
*************************************************************************/
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
unsigned int seconds = 30;
DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
"0x%x\n", ha->host_no, sem_mask, sem_bits));
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
"code = 0x%x\n", ha->host_no,
sem_mask, sem_bits));
return QLA_SUCCESS;
}
ssleep(1);
} while (--seconds);
return QLA_ERROR;
}
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(sem_mask, isp_semaphore(ha));
readl(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
sem_mask));
}
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
"0x%x, sema code=0x%x\n", ha->host_no,
sem_mask, sem_bits, value));
return 1;
}
return 0;
}
| gpl-2.0 |
yaymalaga/yayPrime_kernel | arch/arm/mach-omap2/board-ti8168evm.c | 4829 | 1878 | /*
* Code for TI8168/TI8148 EVM.
*
* Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/irqs.h>
#include <plat/board.h>
#include "common.h"
#include <plat/usb.h>
static struct omap_musb_board_data musb_board_data = {
.set_phy_power = ti81xx_musb_phy_power,
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 500,
};
static struct omap_board_config_kernel ti81xx_evm_config[] __initdata = {
};
static void __init ti81xx_evm_init(void)
{
omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap_board_config = ti81xx_evm_config;
omap_board_config_size = ARRAY_SIZE(ti81xx_evm_config);
usb_musb_init(&musb_board_data);
}
MACHINE_START(TI8168EVM, "ti8168evm")
/* Maintainer: Texas Instruments */
.atag_offset = 0x100,
.map_io = ti81xx_map_io,
.init_early = ti81xx_init_early,
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
.restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(TI8148EVM, "ti8148evm")
/* Maintainer: Texas Instruments */
.atag_offset = 0x100,
.map_io = ti81xx_map_io,
.init_early = ti81xx_init_early,
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
rutvik95/android_kernel_samsung_i9060 | drivers/media/video/s5p-g2d/g2d-hw.c | 5085 | 1997 | /*
* Samsung S5P G2D - 2D Graphics Accelerator Driver
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Kamil Debski, <k.debski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the
* License, or (at your option) any later version
*/
#include <linux/io.h>
#include "g2d.h"
#include "g2d-regs.h"
#define w(x, a) writel((x), d->regs + (a))
#define r(a) readl(d->regs + (a))
/* g2d_reset clears all g2d registers */
void g2d_reset(struct g2d_dev *d)
{
w(1, SOFT_RESET_REG);
}
void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
w(f->stride & 0xFFFF, SRC_STRIDE_REG);
n = f->o_height & 0xFFF;
n <<= 16;
n |= f->o_width & 0xFFF;
w(n, SRC_LEFT_TOP_REG);
n = f->bottom & 0xFFF;
n <<= 16;
n |= f->right & 0xFFF;
w(n, SRC_RIGHT_BOTTOM_REG);
w(f->fmt->hw, SRC_COLOR_MODE_REG);
}
void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a)
{
w(a, SRC_BASE_ADDR_REG);
}
void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
w(f->stride & 0xFFFF, DST_STRIDE_REG);
n = f->o_height & 0xFFF;
n <<= 16;
n |= f->o_width & 0xFFF;
w(n, DST_LEFT_TOP_REG);
n = f->bottom & 0xFFF;
n <<= 16;
n |= f->right & 0xFFF;
w(n, DST_RIGHT_BOTTOM_REG);
w(f->fmt->hw, DST_COLOR_MODE_REG);
}
void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a)
{
w(a, DST_BASE_ADDR_REG);
}
void g2d_set_rop4(struct g2d_dev *d, u32 r)
{
w(r, ROP4_REG);
}
void g2d_set_flip(struct g2d_dev *d, u32 r)
{
w(r, SRC_MSK_DIRECT_REG);
}
u32 g2d_cmd_stretch(u32 e)
{
e &= 1;
return e << 4;
}
void g2d_set_cmd(struct g2d_dev *d, u32 c)
{
w(c, BITBLT_COMMAND_REG);
}
void g2d_start(struct g2d_dev *d)
{
/* Clear cache */
w(0x7, CACHECTL_REG);
/* Enable interrupt */
w(1, INTEN_REG);
/* Start G2D engine */
w(1, BITBLT_START_REG);
}
void g2d_clear_int(struct g2d_dev *d)
{
w(1, INTC_PEND_REG);
}
| gpl-2.0 |
horuscentro/android_kernel_motorola_msm8226 | drivers/isdn/divert/isdn_divert.c | 5085 | 23568 | /* $Id: isdn_divert.c,v 1.6.6.3 2001/09/23 22:24:36 kai Exp $
*
* DSS1 main diversion supplementary handling for i4l.
*
* Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include "isdn_divert.h"
/**********************************/
/* structure keeping calling info */
/**********************************/
struct call_struc
{ isdn_ctrl ics; /* delivered setup + driver parameters */
ulong divert_id; /* Id delivered to user */
unsigned char akt_state; /* actual state */
char deflect_dest[35]; /* deflection destination */
struct timer_list timer; /* timer control structure */
char info[90]; /* device info output */
struct call_struc *next; /* pointer to next entry */
struct call_struc *prev;
};
/********************************************/
/* structure keeping deflection table entry */
/********************************************/
struct deflect_struc
{ struct deflect_struc *next, *prev;
divert_rule rule; /* used rule */
};
/*****************************************/
/* variables for main diversion services */
/*****************************************/
/* diversion/deflection processes */
static struct call_struc *divert_head = NULL; /* head of remembered entrys */
static ulong next_id = 1; /* next info id */
static struct deflect_struc *table_head = NULL;
static struct deflect_struc *table_tail = NULL;
static unsigned char extern_wait_max = 4; /* maximum wait in s for external process */
DEFINE_SPINLOCK(divert_lock);
/***************************/
/* timer callback function */
/***************************/
static void deflect_timer_expire(ulong arg)
{
unsigned long flags;
struct call_struc *cs = (struct call_struc *) arg;
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs->timer); /* delete active timer */
spin_unlock_irqrestore(&divert_lock, flags);
switch (cs->akt_state)
{ case DEFLECT_PROCEED:
cs->ics.command = ISDN_CMD_HANGUP; /* cancel action */
divert_if.ll_cmd(&cs->ics);
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
break;
case DEFLECT_ALERT:
cs->ics.command = ISDN_CMD_REDIR; /* protocol */
strlcpy(cs->ics.parm.setup.phone, cs->deflect_dest, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn, "Testtext delayed");
divert_if.ll_cmd(&cs->ics);
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
break;
case DEFLECT_AUTODEL:
default:
spin_lock_irqsave(&divert_lock, flags);
if (cs->prev)
cs->prev->next = cs->next; /* forward link */
else
divert_head = cs->next;
if (cs->next)
cs->next->prev = cs->prev; /* back link */
spin_unlock_irqrestore(&divert_lock, flags);
kfree(cs);
return;
} /* switch */
} /* deflect_timer_func */
/*****************************************/
/* handle call forwarding de/activations */
/* 0 = deact, 1 = act, 2 = interrogate */
/*****************************************/
int cf_command(int drvid, int mode,
u_char proc, char *msn,
u_char service, char *fwd_nr, ulong *procid)
{ unsigned long flags;
int retval, msnlen;
int fwd_len;
char *p, *ielenp, tmp[60];
struct call_struc *cs;
if (strchr(msn, '.')) return (-EINVAL); /* subaddress not allowed in msn */
if ((proc & 0x7F) > 2) return (-EINVAL);
proc &= 3;
p = tmp;
*p++ = 0x30; /* enumeration */
ielenp = p++; /* remember total length position */
*p++ = 0xa; /* proc tag */
*p++ = 1; /* length */
*p++ = proc & 0x7F; /* procedure to de/activate/interrogate */
*p++ = 0xa; /* service tag */
*p++ = 1; /* length */
*p++ = service; /* service to handle */
if (mode == 1)
{ if (!*fwd_nr) return (-EINVAL); /* destination missing */
if (strchr(fwd_nr, '.')) return (-EINVAL); /* subaddress not allowed */
fwd_len = strlen(fwd_nr);
*p++ = 0x30; /* number enumeration */
*p++ = fwd_len + 2; /* complete forward to len */
*p++ = 0x80; /* fwd to nr */
*p++ = fwd_len; /* length of number */
strcpy(p, fwd_nr); /* copy number */
p += fwd_len; /* pointer beyond fwd */
} /* activate */
msnlen = strlen(msn);
*p++ = 0x80; /* msn number */
if (msnlen > 1)
{ *p++ = msnlen; /* length */
strcpy(p, msn);
p += msnlen;
}
else *p++ = 0;
*ielenp = p - ielenp - 1; /* set total IE length */
/* allocate mem for information struct */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (-ENOMEM); /* no memory */
init_timer(&cs->timer);
cs->info[0] = '\0';
cs->timer.function = deflect_timer_expire;
cs->timer.data = (ulong) cs; /* pointer to own structure */
cs->ics.driver = drvid;
cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
cs->ics.arg = DSS1_CMD_INVOKE; /* invoke supplementary service */
cs->ics.parm.dss1_io.proc = (mode == 1) ? 7 : (mode == 2) ? 11 : 8; /* operation */
cs->ics.parm.dss1_io.timeout = 4000; /* from ETS 300 207-1 */
cs->ics.parm.dss1_io.datalen = p - tmp; /* total len */
cs->ics.parm.dss1_io.data = tmp; /* start of buffer */
spin_lock_irqsave(&divert_lock, flags);
cs->ics.parm.dss1_io.ll_id = next_id++; /* id for callback */
spin_unlock_irqrestore(&divert_lock, flags);
*procid = cs->ics.parm.dss1_io.ll_id;
sprintf(cs->info, "%d 0x%lx %s%s 0 %s %02x %d%s%s\n",
(!mode) ? DIVERT_DEACTIVATE : (mode == 1) ? DIVERT_ACTIVATE : DIVERT_REPORT,
cs->ics.parm.dss1_io.ll_id,
(mode != 2) ? "" : "0 ",
divert_if.drv_to_name(cs->ics.driver),
msn,
service & 0xFF,
proc,
(mode != 1) ? "" : " 0 ",
(mode != 1) ? "" : fwd_nr);
retval = divert_if.ll_cmd(&cs->ics); /* execute command */
if (!retval)
{ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
spin_unlock_irqrestore(&divert_lock, flags);
}
else
kfree(cs);
return (retval);
} /* cf_command */
/****************************************/
/* handle a external deflection command */
/****************************************/
int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
{ struct call_struc *cs;
isdn_ctrl ic;
unsigned long flags;
int i;
if ((cmd & 0x7F) > 2) return (-EINVAL); /* invalid command */
cs = divert_head; /* start of parameter list */
while (cs)
{ if (cs->divert_id == callid) break; /* found */
cs = cs->next;
} /* search entry */
if (!cs) return (-EINVAL); /* invalid callid */
ic.driver = cs->ics.driver;
ic.arg = cs->ics.arg;
i = -EINVAL;
if (cs->akt_state == DEFLECT_AUTODEL) return (i); /* no valid call */
switch (cmd & 0x7F)
{ case 0: /* hangup */
del_timer(&cs->timer);
ic.command = ISDN_CMD_HANGUP;
i = divert_if.ll_cmd(&ic);
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
break;
case 1: /* alert */
if (cs->akt_state == DEFLECT_ALERT) return (0);
cmd &= 0x7F; /* never wait */
del_timer(&cs->timer);
ic.command = ISDN_CMD_ALERT;
if ((i = divert_if.ll_cmd(&ic)))
{
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
}
else
cs->akt_state = DEFLECT_ALERT;
break;
case 2: /* redir */
del_timer(&cs->timer);
strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
ic.command = ISDN_CMD_REDIR;
if ((i = divert_if.ll_cmd(&ic)))
{
spin_lock_irqsave(&divert_lock, flags);
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
}
else
cs->akt_state = DEFLECT_ALERT;
break;
} /* switch */
return (i);
} /* deflect_extern_action */
/********************************/
/* insert a new rule before idx */
/********************************/
int insertrule(int idx, divert_rule *newrule)
{ struct deflect_struc *ds, *ds1 = NULL;
unsigned long flags;
if (!(ds = kmalloc(sizeof(struct deflect_struc),
GFP_KERNEL)))
return (-ENOMEM); /* no memory */
ds->rule = *newrule; /* set rule */
spin_lock_irqsave(&divert_lock, flags);
if (idx >= 0)
{ ds1 = table_head;
while ((ds1) && (idx > 0))
{ idx--;
ds1 = ds1->next;
}
if (!ds1) idx = -1;
}
if (idx < 0)
{ ds->prev = table_tail; /* previous entry */
ds->next = NULL; /* end of chain */
if (ds->prev)
ds->prev->next = ds; /* last forward */
else
table_head = ds; /* is first entry */
table_tail = ds; /* end of queue */
}
else
{ ds->next = ds1; /* next entry */
ds->prev = ds1->prev; /* prev entry */
ds1->prev = ds; /* backward chain old element */
if (!ds->prev)
table_head = ds; /* first element */
}
spin_unlock_irqrestore(&divert_lock, flags);
return (0);
} /* insertrule */
/***********************************/
/* delete the rule at position idx */
/***********************************/
int deleterule(int idx)
{ struct deflect_struc *ds, *ds1;
unsigned long flags;
if (idx < 0)
{ spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
table_head = NULL;
table_tail = NULL;
spin_unlock_irqrestore(&divert_lock, flags);
while (ds)
{ ds1 = ds;
ds = ds->next;
kfree(ds1);
}
return (0);
}
spin_lock_irqsave(&divert_lock, flags);
ds = table_head;
while ((ds) && (idx > 0))
{ idx--;
ds = ds->next;
}
if (!ds)
{
spin_unlock_irqrestore(&divert_lock, flags);
return (-EINVAL);
}
if (ds->next)
ds->next->prev = ds->prev; /* backward chain */
else
table_tail = ds->prev; /* end of chain */
if (ds->prev)
ds->prev->next = ds->next; /* forward chain */
else
table_head = ds->next; /* start of chain */
spin_unlock_irqrestore(&divert_lock, flags);
kfree(ds);
return (0);
} /* deleterule */
/*******************************************/
/* get a pointer to a specific rule number */
/*******************************************/
divert_rule *getruleptr(int idx)
{ struct deflect_struc *ds = table_head;
if (idx < 0) return (NULL);
while ((ds) && (idx >= 0))
{ if (!(idx--))
{ return (&ds->rule);
break;
}
ds = ds->next;
}
return (NULL);
} /* getruleptr */
/*************************************************/
/* called from common module on an incoming call */
/*************************************************/
static int isdn_divert_icall(isdn_ctrl *ic)
{ int retval = 0;
unsigned long flags;
struct call_struc *cs = NULL;
struct deflect_struc *dv;
char *p, *p1;
u_char accept;
/* first check the internal deflection table */
for (dv = table_head; dv; dv = dv->next)
{ /* scan table */
if (((dv->rule.callopt == 1) && (ic->command == ISDN_STAT_ICALLW)) ||
((dv->rule.callopt == 2) && (ic->command == ISDN_STAT_ICALL)))
continue; /* call option check */
if (!(dv->rule.drvid & (1L << ic->driver)))
continue; /* driver not matching */
if ((dv->rule.si1) && (dv->rule.si1 != ic->parm.setup.si1))
continue; /* si1 not matching */
if ((dv->rule.si2) && (dv->rule.si2 != ic->parm.setup.si2))
continue; /* si2 not matching */
p = dv->rule.my_msn;
p1 = ic->parm.setup.eazmsn;
accept = 0;
while (*p)
{ /* complete compare */
if (*p == '-')
{ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
break; /* not accepted */
if ((!*p) && (!*p1))
accept = 1;
} /* complete compare */
if (!accept) continue; /* not accepted */
if ((strcmp(dv->rule.caller, "0")) || (ic->parm.setup.phone[0]))
{ p = dv->rule.caller;
p1 = ic->parm.setup.phone;
accept = 0;
while (*p)
{ /* complete compare */
if (*p == '-')
{ accept = 1; /* call accepted */
break;
}
if (*p++ != *p1++)
break; /* not accepted */
if ((!*p) && (!*p1))
accept = 1;
} /* complete compare */
if (!accept) continue; /* not accepted */
}
switch (dv->rule.action)
{ case DEFLECT_IGNORE:
return (0);
break;
case DEFLECT_ALERT:
case DEFLECT_PROCEED:
case DEFLECT_REPORT:
case DEFLECT_REJECT:
if (dv->rule.action == DEFLECT_PROCEED)
if ((!if_used) || ((!extern_wait_max) && (!dv->rule.waittime)))
return (0); /* no external deflection needed */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (0); /* no memory */
init_timer(&cs->timer);
cs->info[0] = '\0';
cs->timer.function = deflect_timer_expire;
cs->timer.data = (ulong) cs; /* pointer to own structure */
cs->ics = *ic; /* copy incoming data */
if (!cs->ics.parm.setup.phone[0]) strcpy(cs->ics.parm.setup.phone, "0");
if (!cs->ics.parm.setup.eazmsn[0]) strcpy(cs->ics.parm.setup.eazmsn, "0");
cs->ics.parm.setup.screen = dv->rule.screen;
if (dv->rule.waittime)
cs->timer.expires = jiffies + (HZ * dv->rule.waittime);
else
if (dv->rule.action == DEFLECT_PROCEED)
cs->timer.expires = jiffies + (HZ * extern_wait_max);
else
cs->timer.expires = 0;
cs->akt_state = dv->rule.action;
spin_lock_irqsave(&divert_lock, flags);
cs->divert_id = next_id++; /* new sequence number */
spin_unlock_irqrestore(&divert_lock, flags);
cs->prev = NULL;
if (cs->akt_state == DEFLECT_ALERT)
{ strcpy(cs->deflect_dest, dv->rule.to_nr);
if (!cs->timer.expires)
{ strcpy(ic->parm.setup.eazmsn, "Testtext direct");
ic->parm.setup.screen = dv->rule.screen;
strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
retval = 5;
}
else
retval = 1; /* alerting */
}
else
{ cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
cs->akt_state,
cs->divert_id,
divert_if.drv_to_name(cs->ics.driver),
(ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
cs->ics.parm.setup.phone,
cs->ics.parm.setup.eazmsn,
cs->ics.parm.setup.si1,
cs->ics.parm.setup.si2,
cs->ics.parm.setup.screen,
dv->rule.waittime,
cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
(dv->rule.action == DEFLECT_REJECT))
{ put_info_buffer(cs->info);
kfree(cs); /* remove */
return ((dv->rule.action == DEFLECT_REPORT) ? 0 : 2); /* nothing to do */
}
break;
default:
return (0); /* ignore call */
break;
} /* switch action */
break;
} /* scan_table */
if (cs)
{ cs->prev = NULL;
spin_lock_irqsave(&divert_lock, flags);
cs->next = divert_head;
divert_head = cs;
if (cs->timer.expires) add_timer(&cs->timer);
spin_unlock_irqrestore(&divert_lock, flags);
put_info_buffer(cs->info);
return (retval);
}
else
return (0);
} /* isdn_divert_icall */
void deleteprocs(void)
{ struct call_struc *cs, *cs1;
unsigned long flags;
spin_lock_irqsave(&divert_lock, flags);
cs = divert_head;
divert_head = NULL;
while (cs)
{ del_timer(&cs->timer);
cs1 = cs;
cs = cs->next;
kfree(cs1);
}
spin_unlock_irqrestore(&divert_lock, flags);
} /* deleteprocs */
/****************************************************/
/* put a address including address type into buffer */
/****************************************************/
static int put_address(char *st, u_char *p, int len)
{ u_char retval = 0;
u_char adr_typ = 0; /* network standard */
if (len < 2) return (retval);
if (*p == 0xA1)
{ retval = *(++p) + 2; /* total length */
if (retval > len) return (0); /* too short */
len = retval - 2; /* remaining length */
if (len < 3) return (0);
if ((*(++p) != 0x0A) || (*(++p) != 1)) return (0);
adr_typ = *(++p);
len -= 3;
p++;
if (len < 2) return (0);
if (*p++ != 0x12) return (0);
if (*p > len) return (0); /* check number length */
len = *p++;
}
else
if (*p == 0x80)
{ retval = *(++p) + 2; /* total length */
if (retval > len) return (0);
len = retval - 2;
p++;
}
else
return (0); /* invalid address information */
sprintf(st, "%d ", adr_typ);
st += strlen(st);
if (!len)
*st++ = '-';
else
while (len--)
*st++ = *p++;
*st = '\0';
return (retval);
} /* put_address */
/*************************************/
/* report a successful interrogation */
/*************************************/
static int interrogate_success(isdn_ctrl *ic, struct call_struc *cs)
{ char *src = ic->parm.dss1_io.data;
int restlen = ic->parm.dss1_io.datalen;
int cnt = 1;
u_char n, n1;
char st[90], *p, *stp;
if (restlen < 2) return (-100); /* frame too short */
if (*src++ != 0x30) return (-101);
if ((n = *src++) > 0x81) return (-102); /* invalid length field */
restlen -= 2; /* remaining bytes */
if (n == 0x80)
{ if (restlen < 2) return (-103);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-104);
restlen -= 2;
}
else
if (n == 0x81)
{ n = *src++;
restlen--;
if (n > restlen) return (-105);
restlen = n;
}
else
if (n > restlen) return (-106);
else
restlen = n; /* standard format */
if (restlen < 3) return (-107); /* no procedure */
if ((*src++ != 2) || (*src++ != 1) || (*src++ != 0x0B)) return (-108);
restlen -= 3;
if (restlen < 2) return (-109); /* list missing */
if (*src == 0x31)
{ src++;
if ((n = *src++) > 0x81) return (-110); /* invalid length field */
restlen -= 2; /* remaining bytes */
if (n == 0x80)
{ if (restlen < 2) return (-111);
if ((*(src + restlen - 1)) || (*(src + restlen - 2))) return (-112);
restlen -= 2;
}
else
if (n == 0x81)
{ n = *src++;
restlen--;
if (n > restlen) return (-113);
restlen = n;
}
else
if (n > restlen) return (-114);
else
restlen = n; /* standard format */
} /* result list header */
while (restlen >= 2)
{ stp = st;
sprintf(stp, "%d 0x%lx %d %s ", DIVERT_REPORT, ic->parm.dss1_io.ll_id,
cnt++, divert_if.drv_to_name(ic->driver));
stp += strlen(stp);
if (*src++ != 0x30) return (-115); /* invalid enum */
n = *src++;
restlen -= 2;
if (n > restlen) return (-116); /* enum length wrong */
restlen -= n;
p = src; /* one entry */
src += n;
if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
stp += strlen(stp);
p += n1;
n -= n1;
if (n < 6) continue; /* no service and proc */
if ((*p++ != 0x0A) || (*p++ != 1)) continue;
sprintf(stp, " 0x%02x ", (*p++) & 0xFF);
stp += strlen(stp);
if ((*p++ != 0x0A) || (*p++ != 1)) continue;
sprintf(stp, "%d ", (*p++) & 0xFF);
stp += strlen(stp);
n -= 6;
if (n > 2)
{ if (*p++ != 0x30) continue;
if (*p > (n - 2)) continue;
n = *p++;
if (!(n1 = put_address(stp, p, n & 0xFF))) continue;
stp += strlen(stp);
}
sprintf(stp, "\n");
put_info_buffer(st);
} /* while restlen */
if (restlen) return (-117);
return (0);
} /* interrogate_success */
/*********************************************/
/* callback for protocol specific extensions */
/*********************************************/
static int prot_stat_callback(isdn_ctrl *ic)
{ struct call_struc *cs, *cs1;
int i;
unsigned long flags;
cs = divert_head; /* start of list */
cs1 = NULL;
while (cs)
{ if (ic->driver == cs->ics.driver)
{ switch (cs->ics.arg)
{ case DSS1_CMD_INVOKE:
if ((cs->ics.parm.dss1_io.ll_id == ic->parm.dss1_io.ll_id) &&
(cs->ics.parm.dss1_io.hl_id == ic->parm.dss1_io.hl_id))
{ switch (ic->arg)
{ case DSS1_STAT_INVOKE_ERR:
sprintf(cs->info, "128 0x%lx 0x%x\n",
ic->parm.dss1_io.ll_id,
ic->parm.dss1_io.timeout);
put_info_buffer(cs->info);
break;
case DSS1_STAT_INVOKE_RES:
switch (cs->ics.parm.dss1_io.proc)
{ case 7:
case 8:
put_info_buffer(cs->info);
break;
case 11:
i = interrogate_success(ic, cs);
if (i)
sprintf(cs->info, "%d 0x%lx %d\n", DIVERT_REPORT,
ic->parm.dss1_io.ll_id, i);
put_info_buffer(cs->info);
break;
default:
printk(KERN_WARNING "dss1_divert: unknown proc %d\n", cs->ics.parm.dss1_io.proc);
break;
}
break;
default:
printk(KERN_WARNING "dss1_divert unknown invoke answer %lx\n", ic->arg);
break;
}
cs1 = cs; /* remember structure */
cs = NULL;
continue; /* abort search */
} /* id found */
break;
case DSS1_CMD_INVOKE_ABORT:
printk(KERN_WARNING "dss1_divert unhandled invoke abort\n");
break;
default:
printk(KERN_WARNING "dss1_divert unknown cmd 0x%lx\n", cs->ics.arg);
break;
} /* switch ics.arg */
cs = cs->next;
} /* driver ok */
}
if (!cs1)
{ printk(KERN_WARNING "dss1_divert unhandled process\n");
return (0);
}
if (cs1->ics.driver == -1)
{
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs1->timer);
if (cs1->prev)
cs1->prev->next = cs1->next; /* forward link */
else
divert_head = cs1->next;
if (cs1->next)
cs1->next->prev = cs1->prev; /* back link */
spin_unlock_irqrestore(&divert_lock, flags);
kfree(cs1);
}
return (0);
} /* prot_stat_callback */
/***************************/
/* status callback from HL */
/***************************/
static int isdn_divert_stat_callback(isdn_ctrl *ic)
{ struct call_struc *cs, *cs1;
unsigned long flags;
int retval;
retval = -1;
cs = divert_head; /* start of list */
while (cs)
{ if ((ic->driver == cs->ics.driver) && (ic->arg == cs->ics.arg))
{ switch (ic->command)
{ case ISDN_STAT_DHUP:
sprintf(cs->info, "129 0x%lx\n", cs->divert_id);
del_timer(&cs->timer);
cs->ics.driver = -1;
break;
case ISDN_STAT_CAUSE:
sprintf(cs->info, "130 0x%lx %s\n", cs->divert_id, ic->parm.num);
break;
case ISDN_STAT_REDIR:
sprintf(cs->info, "131 0x%lx\n", cs->divert_id);
del_timer(&cs->timer);
cs->ics.driver = -1;
break;
default:
sprintf(cs->info, "999 0x%lx 0x%x\n", cs->divert_id, (int)(ic->command));
break;
}
put_info_buffer(cs->info);
retval = 0;
}
cs1 = cs;
cs = cs->next;
if (cs1->ics.driver == -1)
{
spin_lock_irqsave(&divert_lock, flags);
if (cs1->prev)
cs1->prev->next = cs1->next; /* forward link */
else
divert_head = cs1->next;
if (cs1->next)
cs1->next->prev = cs1->prev; /* back link */
spin_unlock_irqrestore(&divert_lock, flags);
kfree(cs1);
}
}
return (retval); /* not found */
} /* isdn_divert_stat_callback */
/********************/
/* callback from ll */
/********************/
int ll_callback(isdn_ctrl *ic)
{
switch (ic->command)
{ case ISDN_STAT_ICALL:
case ISDN_STAT_ICALLW:
return (isdn_divert_icall(ic));
break;
case ISDN_STAT_PROT:
if ((ic->arg & 0xFF) == ISDN_PTYPE_EURO)
{ if (ic->arg != DSS1_STAT_INVOKE_BRD)
return (prot_stat_callback(ic));
else
return (0); /* DSS1 invoke broadcast */
}
else
return (-1); /* protocol not euro */
default:
return (isdn_divert_stat_callback(ic));
}
} /* ll_callback */
| gpl-2.0 |
iamroot9C-arm/linux | drivers/tty/serial/dz.c | 5085 | 23142 | /*
* dz.c: Serial port driver for DECstations equipped
* with the DZ chipset.
*
* Copyright (C) 1998 Olivier A. D. Lebaillif
*
* Email: olivier.lebaillif@ifrsys.com
*
* Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki
*
* [31-AUG-98] triemer
* Changed IRQ to use Harald's dec internals interrupts.h
* removed base_addr code - moving address assignment to setup.c
* Changed name of dz_init to rs_init to be consistent with tc code
* [13-NOV-98] triemer fixed code to receive characters
* after patches by harald to irq code.
* [09-JAN-99] triemer minor fix for schedule - due to removal of timeout
* field from "current" - somewhere between 2.1.121 and 2.1.131
Qua Jun 27 15:02:26 BRT 2001
* [27-JUN-2001] Arnaldo Carvalho de Melo <acme@conectiva.com.br> - cleanups
*
* Parts (C) 1999 David Airlie, airlied@linux.ie
* [07-SEP-99] Bugfixes
*
* [06-Jan-2002] Russell King <rmk@arm.linux.org.uk>
* Converted to new serial core
*/
#undef DEBUG_DZ
#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/atomic.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/kn01.h>
#include <asm/dec/kn02.h>
#include <asm/dec/machtype.h>
#include <asm/dec/prom.h>
#include <asm/dec/system.h>
#include "dz.h"
MODULE_DESCRIPTION("DECstation DZ serial driver");
MODULE_LICENSE("GPL");
static char dz_name[] __initdata = "DECstation DZ serial driver version ";
static char dz_version[] __initdata = "1.04";
struct dz_port {
struct dz_mux *mux;
struct uart_port port;
unsigned int cflag;
};
struct dz_mux {
struct dz_port dport[DZ_NB_PORT];
atomic_t map_guard;
atomic_t irq_guard;
int initialised;
};
static struct dz_mux dz_mux;
static inline struct dz_port *to_dport(struct uart_port *uport)
{
return container_of(uport, struct dz_port, port);
}
/*
* ------------------------------------------------------------
* dz_in () and dz_out ()
*
* These routines are used to access the registers of the DZ
* chip, hiding relocation differences between implementation.
* ------------------------------------------------------------
*/
static u16 dz_in(struct dz_port *dport, unsigned offset)
{
void __iomem *addr = dport->port.membase + offset;
return readw(addr);
}
static void dz_out(struct dz_port *dport, unsigned offset, u16 value)
{
void __iomem *addr = dport->port.membase + offset;
writew(value, addr);
}
/*
* ------------------------------------------------------------
* rs_stop () and rs_start ()
*
* These routines are called before setting or resetting
* tty->stopped. They enable or disable transmitter interrupts,
* as necessary.
* ------------------------------------------------------------
*/
static void dz_stop_tx(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
u16 tmp, mask = 1 << dport->port.line;
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp &= ~mask; /* clear the TX flag */
dz_out(dport, DZ_TCR, tmp);
}
static void dz_start_tx(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
u16 tmp, mask = 1 << dport->port.line;
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp |= mask; /* set the TX flag */
dz_out(dport, DZ_TCR, tmp);
}
static void dz_stop_rx(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
dport->cflag &= ~DZ_RXENAB;
dz_out(dport, DZ_LPR, dport->cflag);
}
static void dz_enable_ms(struct uart_port *uport)
{
/* nothing to do */
}
/*
* ------------------------------------------------------------
*
* Here start the interrupt handling routines. All of the following
* subroutines are declared as inline and are folded into
* dz_interrupt. They were separated out for readability's sake.
*
* Note: dz_interrupt() is a "fast" interrupt, which means that it
* runs with interrupts turned off. People who may want to modify
* dz_interrupt() should try to keep the interrupt handler as fast as
* possible. After you are done making modifications, it is not a bad
* idea to do:
*
* make drivers/serial/dz.s
*
* and look at the resulting assemble code in dz.s.
*
* ------------------------------------------------------------
*/
/*
* ------------------------------------------------------------
* receive_char ()
*
* This routine deals with inputs from any lines.
* ------------------------------------------------------------
*/
static inline void dz_receive_chars(struct dz_mux *mux)
{
struct uart_port *uport;
struct dz_port *dport = &mux->dport[0];
struct tty_struct *tty = NULL;
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
unsigned char ch, flag;
u16 status;
int i;
while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
dport = &mux->dport[LINE(status)];
uport = &dport->port;
tty = uport->state->port.tty; /* point to the proper dev */
ch = UCHAR(status); /* grab the char */
flag = TTY_NORMAL;
icount = &uport->icount;
icount->rx++;
if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) {
/*
* There is no separate BREAK status bit, so treat
* null characters with framing errors as BREAKs;
* normally, otherwise. For this move the Framing
* Error bit to a simulated BREAK bit.
*/
if (!ch) {
status |= (status & DZ_FERR) >>
(ffs(DZ_FERR) - ffs(DZ_BREAK));
status &= ~DZ_FERR;
}
/* Handle SysRq/SAK & keep track of the statistics. */
if (status & DZ_BREAK) {
icount->brk++;
if (uart_handle_break(uport))
continue;
} else if (status & DZ_FERR)
icount->frame++;
else if (status & DZ_PERR)
icount->parity++;
if (status & DZ_OERR)
icount->overrun++;
status &= uport->read_status_mask;
if (status & DZ_BREAK)
flag = TTY_BREAK;
else if (status & DZ_FERR)
flag = TTY_FRAME;
else if (status & DZ_PERR)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(uport, ch))
continue;
uart_insert_char(uport, status, DZ_OERR, ch, flag);
lines_rx[LINE(status)] = 1;
}
for (i = 0; i < DZ_NB_PORT; i++)
if (lines_rx[i])
tty_flip_buffer_push(mux->dport[i].port.state->port.tty);
}
/*
* ------------------------------------------------------------
* transmit_char ()
*
* This routine deals with outputs to any lines.
* ------------------------------------------------------------
*/
static inline void dz_transmit_chars(struct dz_mux *mux)
{
struct dz_port *dport = &mux->dport[0];
struct circ_buf *xmit;
unsigned char tmp;
u16 status;
status = dz_in(dport, DZ_CSR);
dport = &mux->dport[LINE(status)];
xmit = &dport->port.state->xmit;
if (dport->port.x_char) { /* XON/XOFF chars */
dz_out(dport, DZ_TDR, dport->port.x_char);
dport->port.icount.tx++;
dport->port.x_char = 0;
return;
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
spin_unlock(&dport->port.lock);
return;
}
/*
* If something to do... (remember the dz has no output fifo,
* so we go one char at a time) :-<
*/
tmp = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
dz_out(dport, DZ_TDR, tmp);
dport->port.icount.tx++;
if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
uart_write_wakeup(&dport->port);
/* Are we are done. */
if (uart_circ_empty(xmit)) {
spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
spin_unlock(&dport->port.lock);
}
}
/*
* ------------------------------------------------------------
* check_modem_status()
*
* DS 3100 & 5100: Only valid for the MODEM line, duh!
* DS 5000/200: Valid for the MODEM and PRINTER line.
* ------------------------------------------------------------
*/
static inline void check_modem_status(struct dz_port *dport)
{
/*
* FIXME:
* 1. No status change interrupt; use a timer.
* 2. Handle the 3100/5000 as appropriate. --macro
*/
u16 status;
/* If not the modem line just return. */
if (dport->port.line != DZ_MODEM)
return;
status = dz_in(dport, DZ_MSR);
/* it's easy, since DSR2 is the only bit in the register */
if (status)
dport->port.icount.dsr++;
}
/*
* ------------------------------------------------------------
* dz_interrupt ()
*
* this is the main interrupt routine for the DZ chip.
* It deals with the multiple ports.
* ------------------------------------------------------------
*/
static irqreturn_t dz_interrupt(int irq, void *dev_id)
{
struct dz_mux *mux = dev_id;
struct dz_port *dport = &mux->dport[0];
u16 status;
/* get the reason why we just got an irq */
status = dz_in(dport, DZ_CSR);
if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
dz_receive_chars(mux);
if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
dz_transmit_chars(mux);
return IRQ_HANDLED;
}
/*
* -------------------------------------------------------------------
* Here ends the DZ interrupt routines.
* -------------------------------------------------------------------
*/
static unsigned int dz_get_mctrl(struct uart_port *uport)
{
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
struct dz_port *dport = to_dport(uport);
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
if (dport->port.line == DZ_MODEM) {
if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR)
mctrl &= ~TIOCM_DSR;
}
return mctrl;
}
static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
{
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
struct dz_port *dport = to_dport(uport);
u16 tmp;
if (dport->port.line == DZ_MODEM) {
tmp = dz_in(dport, DZ_TCR);
if (mctrl & TIOCM_DTR)
tmp &= ~DZ_MODEM_DTR;
else
tmp |= DZ_MODEM_DTR;
dz_out(dport, DZ_TCR, tmp);
}
}
/*
* -------------------------------------------------------------------
* startup ()
*
* various initialization tasks
* -------------------------------------------------------------------
*/
static int dz_startup(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
struct dz_mux *mux = dport->mux;
unsigned long flags;
int irq_guard;
int ret;
u16 tmp;
irq_guard = atomic_add_return(1, &mux->irq_guard);
if (irq_guard != 1)
return 0;
ret = request_irq(dport->port.irq, dz_interrupt,
IRQF_SHARED, "dz", mux);
if (ret) {
atomic_add(-1, &mux->irq_guard);
printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq);
return ret;
}
spin_lock_irqsave(&dport->port.lock, flags);
/* Enable interrupts. */
tmp = dz_in(dport, DZ_CSR);
tmp |= DZ_RIE | DZ_TIE;
dz_out(dport, DZ_CSR, tmp);
spin_unlock_irqrestore(&dport->port.lock, flags);
return 0;
}
/*
* -------------------------------------------------------------------
* shutdown ()
*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on.
* -------------------------------------------------------------------
*/
static void dz_shutdown(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
struct dz_mux *mux = dport->mux;
unsigned long flags;
int irq_guard;
u16 tmp;
spin_lock_irqsave(&dport->port.lock, flags);
dz_stop_tx(&dport->port);
spin_unlock_irqrestore(&dport->port.lock, flags);
irq_guard = atomic_add_return(-1, &mux->irq_guard);
if (!irq_guard) {
/* Disable interrupts. */
tmp = dz_in(dport, DZ_CSR);
tmp &= ~(DZ_RIE | DZ_TIE);
dz_out(dport, DZ_CSR, tmp);
free_irq(dport->port.irq, mux);
}
}
/*
* -------------------------------------------------------------------
* dz_tx_empty() -- get the transmitter empty status
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
* -------------------------------------------------------------------
*/
static unsigned int dz_tx_empty(struct uart_port *uport)
{
struct dz_port *dport = to_dport(uport);
unsigned short tmp, mask = 1 << dport->port.line;
tmp = dz_in(dport, DZ_TCR);
tmp &= mask;
return tmp ? 0 : TIOCSER_TEMT;
}
static void dz_break_ctl(struct uart_port *uport, int break_state)
{
/*
* FIXME: Can't access BREAK bits in TDR easily;
* reuse the code for polled TX. --macro
*/
struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short tmp, mask = 1 << dport->port.line;
spin_lock_irqsave(&uport->lock, flags);
tmp = dz_in(dport, DZ_TCR);
if (break_state)
tmp |= mask;
else
tmp &= ~mask;
dz_out(dport, DZ_TCR, tmp);
spin_unlock_irqrestore(&uport->lock, flags);
}
static int dz_encode_baud_rate(unsigned int baud)
{
switch (baud) {
case 50:
return DZ_B50;
case 75:
return DZ_B75;
case 110:
return DZ_B110;
case 134:
return DZ_B134;
case 150:
return DZ_B150;
case 300:
return DZ_B300;
case 600:
return DZ_B600;
case 1200:
return DZ_B1200;
case 1800:
return DZ_B1800;
case 2000:
return DZ_B2000;
case 2400:
return DZ_B2400;
case 3600:
return DZ_B3600;
case 4800:
return DZ_B4800;
case 7200:
return DZ_B7200;
case 9600:
return DZ_B9600;
default:
return -1;
}
}
static void dz_reset(struct dz_port *dport)
{
struct dz_mux *mux = dport->mux;
if (mux->initialised)
return;
dz_out(dport, DZ_CSR, DZ_CLR);
while (dz_in(dport, DZ_CSR) & DZ_CLR);
iob();
/* Enable scanning. */
dz_out(dport, DZ_CSR, DZ_MSE);
mux->initialised = 1;
}
static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
struct ktermios *old_termios)
{
struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned int cflag, baud;
int bflag;
cflag = dport->port.line;
switch (termios->c_cflag & CSIZE) {
case CS5:
cflag |= DZ_CS5;
break;
case CS6:
cflag |= DZ_CS6;
break;
case CS7:
cflag |= DZ_CS7;
break;
case CS8:
default:
cflag |= DZ_CS8;
}
if (termios->c_cflag & CSTOPB)
cflag |= DZ_CSTOPB;
if (termios->c_cflag & PARENB)
cflag |= DZ_PARENB;
if (termios->c_cflag & PARODD)
cflag |= DZ_PARODD;
baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
bflag = dz_encode_baud_rate(baud);
if (bflag < 0) { /* Try to keep unchanged. */
baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
bflag = dz_encode_baud_rate(baud);
if (bflag < 0) { /* Resort to 9600. */
baud = 9600;
bflag = DZ_B9600;
}
tty_termios_encode_baud_rate(termios, baud, baud);
}
cflag |= bflag;
if (termios->c_cflag & CREAD)
cflag |= DZ_RXENAB;
spin_lock_irqsave(&dport->port.lock, flags);
uart_update_timeout(uport, termios->c_cflag, baud);
dz_out(dport, DZ_LPR, cflag);
dport->cflag = cflag;
/* setup accept flag */
dport->port.read_status_mask = DZ_OERR;
if (termios->c_iflag & INPCK)
dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
if (termios->c_iflag & (BRKINT | PARMRK))
dport->port.read_status_mask |= DZ_BREAK;
/* characters to ignore */
uport->ignore_status_mask = 0;
if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))
dport->port.ignore_status_mask |= DZ_OERR;
if (termios->c_iflag & IGNPAR)
dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR;
if (termios->c_iflag & IGNBRK)
dport->port.ignore_status_mask |= DZ_BREAK;
spin_unlock_irqrestore(&dport->port.lock, flags);
}
/*
* Hack alert!
* Required solely so that the initial PROM-based console
* works undisturbed in parallel with this one.
*/
static void dz_pm(struct uart_port *uport, unsigned int state,
unsigned int oldstate)
{
struct dz_port *dport = to_dport(uport);
unsigned long flags;
spin_lock_irqsave(&dport->port.lock, flags);
if (state < 3)
dz_start_tx(&dport->port);
else
dz_stop_tx(&dport->port);
spin_unlock_irqrestore(&dport->port.lock, flags);
}
static const char *dz_type(struct uart_port *uport)
{
return "DZ";
}
static void dz_release_port(struct uart_port *uport)
{
struct dz_mux *mux = to_dport(uport)->mux;
int map_guard;
iounmap(uport->membase);
uport->membase = NULL;
map_guard = atomic_add_return(-1, &mux->map_guard);
if (!map_guard)
release_mem_region(uport->mapbase, dec_kn_slot_size);
}
static int dz_map_port(struct uart_port *uport)
{
if (!uport->membase)
uport->membase = ioremap_nocache(uport->mapbase,
dec_kn_slot_size);
if (!uport->membase) {
printk(KERN_ERR "dz: Cannot map MMIO\n");
return -ENOMEM;
}
return 0;
}
static int dz_request_port(struct uart_port *uport)
{
struct dz_mux *mux = to_dport(uport)->mux;
int map_guard;
int ret;
map_guard = atomic_add_return(1, &mux->map_guard);
if (map_guard == 1) {
if (!request_mem_region(uport->mapbase, dec_kn_slot_size,
"dz")) {
atomic_add(-1, &mux->map_guard);
printk(KERN_ERR
"dz: Unable to reserve MMIO resource\n");
return -EBUSY;
}
}
ret = dz_map_port(uport);
if (ret) {
map_guard = atomic_add_return(-1, &mux->map_guard);
if (!map_guard)
release_mem_region(uport->mapbase, dec_kn_slot_size);
return ret;
}
return 0;
}
static void dz_config_port(struct uart_port *uport, int flags)
{
struct dz_port *dport = to_dport(uport);
if (flags & UART_CONFIG_TYPE) {
if (dz_request_port(uport))
return;
uport->type = PORT_DZ;
dz_reset(dport);
}
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ)
ret = -EINVAL;
if (ser->irq != uport->irq)
ret = -EINVAL;
return ret;
}
static struct uart_ops dz_ops = {
.tx_empty = dz_tx_empty,
.get_mctrl = dz_get_mctrl,
.set_mctrl = dz_set_mctrl,
.stop_tx = dz_stop_tx,
.start_tx = dz_start_tx,
.stop_rx = dz_stop_rx,
.enable_ms = dz_enable_ms,
.break_ctl = dz_break_ctl,
.startup = dz_startup,
.shutdown = dz_shutdown,
.set_termios = dz_set_termios,
.pm = dz_pm,
.type = dz_type,
.release_port = dz_release_port,
.request_port = dz_request_port,
.config_port = dz_config_port,
.verify_port = dz_verify_port,
};
static void __init dz_init_ports(void)
{
static int first = 1;
unsigned long base;
int line;
if (!first)
return;
first = 0;
if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100)
base = dec_kn_slot_base + KN01_DZ11;
else
base = dec_kn_slot_base + KN02_DZ11;
for (line = 0; line < DZ_NB_PORT; line++) {
struct dz_port *dport = &dz_mux.dport[line];
struct uart_port *uport = &dport->port;
dport->mux = &dz_mux;
uport->irq = dec_interrupt[DEC_IRQ_DZ11];
uport->fifosize = 1;
uport->iotype = UPIO_MEM;
uport->flags = UPF_BOOT_AUTOCONF;
uport->ops = &dz_ops;
uport->line = line;
uport->mapbase = base;
}
}
#ifdef CONFIG_SERIAL_DZ_CONSOLE
/*
* -------------------------------------------------------------------
* dz_console_putchar() -- transmit a character
*
* Polled transmission. This is tricky. We need to mask transmit
* interrupts so that they do not interfere, enable the transmitter
* for the line requested and then wait till the transmit scanner
* requests data for this line. But it may request data for another
* line first, in which case we have to disable its transmitter and
* repeat waiting till our line pops up. Only then the character may
* be transmitted. Finally, the state of the transmitter mask is
* restored. Welcome to the world of PDP-11!
* -------------------------------------------------------------------
*/
static void dz_console_putchar(struct uart_port *uport, int ch)
{
struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short csr, tcr, trdy, mask;
int loops = 10000;
spin_lock_irqsave(&dport->port.lock, flags);
csr = dz_in(dport, DZ_CSR);
dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
tcr = dz_in(dport, DZ_TCR);
tcr |= 1 << dport->port.line;
mask = tcr;
dz_out(dport, DZ_TCR, mask);
iob();
spin_unlock_irqrestore(&dport->port.lock, flags);
do {
trdy = dz_in(dport, DZ_CSR);
if (!(trdy & DZ_TRDY))
continue;
trdy = (trdy & DZ_TLINE) >> 8;
if (trdy == dport->port.line)
break;
mask &= ~(1 << trdy);
dz_out(dport, DZ_TCR, mask);
iob();
udelay(2);
} while (--loops);
if (loops) /* Cannot send otherwise. */
dz_out(dport, DZ_TDR, ch);
dz_out(dport, DZ_TCR, tcr);
dz_out(dport, DZ_CSR, csr);
}
/*
* -------------------------------------------------------------------
* dz_console_print ()
*
* dz_console_print is registered for printk.
* The console must be locked when we get here.
* -------------------------------------------------------------------
*/
static void dz_console_print(struct console *co,
const char *str,
unsigned int count)
{
struct dz_port *dport = &dz_mux.dport[co->index];
#ifdef DEBUG_DZ
prom_printf((char *) str);
#endif
uart_console_write(&dport->port, str, count, dz_console_putchar);
}
static int __init dz_console_setup(struct console *co, char *options)
{
struct dz_port *dport = &dz_mux.dport[co->index];
struct uart_port *uport = &dport->port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
ret = dz_map_port(uport);
if (ret)
return ret;
spin_lock_init(&dport->port.lock); /* For dz_pm(). */
dz_reset(dport);
dz_pm(uport, 0, -1);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(&dport->port, co, baud, parity, bits, flow);
}
static struct uart_driver dz_reg;
static struct console dz_console = {
.name = "ttyS",
.write = dz_console_print,
.device = uart_console_device,
.setup = dz_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &dz_reg,
};
static int __init dz_serial_console_init(void)
{
if (!IOASIC) {
dz_init_ports();
register_console(&dz_console);
return 0;
} else
return -ENXIO;
}
console_initcall(dz_serial_console_init);
#define SERIAL_DZ_CONSOLE &dz_console
#else
#define SERIAL_DZ_CONSOLE NULL
#endif /* CONFIG_SERIAL_DZ_CONSOLE */
static struct uart_driver dz_reg = {
.owner = THIS_MODULE,
.driver_name = "serial",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = DZ_NB_PORT,
.cons = SERIAL_DZ_CONSOLE,
};
static int __init dz_init(void)
{
int ret, i;
if (IOASIC)
return -ENXIO;
printk("%s%s\n", dz_name, dz_version);
dz_init_ports();
ret = uart_register_driver(&dz_reg);
if (ret)
return ret;
for (i = 0; i < DZ_NB_PORT; i++)
uart_add_one_port(&dz_reg, &dz_mux.dport[i].port);
return 0;
}
module_init(dz_init);
| gpl-2.0 |
hiikezoe/android_kernel_samsung_scl21 | drivers/staging/wlan-ng/p80211req.c | 8413 | 8045 | /* src/p80211/p80211req.c
*
* Request/Indication/MacMgmt interface handling functions
*
* Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
* --------------------------------------------------------------------
*
* linux-wlan
*
* The contents of this file are subject to the Mozilla Public
* License Version 1.1 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU Public License version 2 (the "GPL"), in which
* case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use
* your version of this file under the MPL, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete
* the provisions above, a recipient may use your version of this
* file under either the MPL or the GPL.
*
* --------------------------------------------------------------------
*
* Inquiries regarding the linux-wlan Open Source project can be
* made directly to:
*
* AbsoluteValue Systems Inc.
* info@linux-wlan.com
* http://www.linux-wlan.com
*
* --------------------------------------------------------------------
*
* Portions of the development of this software were funded by
* Intersil Corporation as part of PRISM(R) chipset product development.
*
* --------------------------------------------------------------------
*
* This file contains the functions, types, and macros to support the
* MLME request interface that's implemented via the device ioctls.
*
* --------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <net/sock.h>
#include <linux/netlink.h>
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
#include "p80211conv.h"
#include "p80211msg.h"
#include "p80211netdev.h"
#include "p80211ioctl.h"
#include "p80211metadef.h"
#include "p80211metastruct.h"
#include "p80211req.h"
static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg);
static int p80211req_mibset_mibget(wlandevice_t *wlandev,
struct p80211msg_dot11req_mibget *mib_msg,
int isget);
/*----------------------------------------------------------------
* p80211req_dorequest
*
* Handles an MLME reqest/confirm message.
*
* Arguments:
* wlandev WLAN device struct
* msgbuf Buffer containing a request message
*
* Returns:
* 0 on success, an errno otherwise
*
* Call context:
* Potentially blocks the caller, so it's a good idea to
* not call this function from an interrupt context.
----------------------------------------------------------------*/
int p80211req_dorequest(wlandevice_t *wlandev, u8 *msgbuf)
{
int result = 0;
struct p80211msg *msg = (struct p80211msg *) msgbuf;
/* Check to make sure the MSD is running */
if (!((wlandev->msdstate == WLAN_MSD_HWPRESENT &&
msg->msgcode == DIDmsg_lnxreq_ifstate) ||
wlandev->msdstate == WLAN_MSD_RUNNING ||
wlandev->msdstate == WLAN_MSD_FWLOAD)) {
return -ENODEV;
}
/* Check Permissions */
if (!capable(CAP_NET_ADMIN) &&
(msg->msgcode != DIDmsg_dot11req_mibget)) {
printk(KERN_ERR
"%s: only dot11req_mibget allowed for non-root.\n",
wlandev->name);
return -EPERM;
}
/* Check for busy status */
if (test_and_set_bit(1, &(wlandev->request_pending)))
return -EBUSY;
/* Allow p80211 to look at msg and handle if desired. */
/* So far, all p80211 msgs are immediate, no waitq/timer necessary */
/* This may change. */
p80211req_handlemsg(wlandev, msg);
/* Pass it down to wlandev via wlandev->mlmerequest */
if (wlandev->mlmerequest != NULL)
wlandev->mlmerequest(wlandev, msg);
clear_bit(1, &(wlandev->request_pending));
return result; /* if result==0, msg->status still may contain an err */
}
/*----------------------------------------------------------------
* p80211req_handlemsg
*
* p80211 message handler. Primarily looks for messages that
* belong to p80211 and then dispatches the appropriate response.
* TODO: we don't do anything yet. Once the linuxMIB is better
* defined we'll need a get/set handler.
*
* Arguments:
* wlandev WLAN device struct
* msg message structure
*
* Returns:
* nothing (any results are set in the status field of the msg)
*
* Call context:
* Process thread
----------------------------------------------------------------*/
static void p80211req_handlemsg(wlandevice_t *wlandev, struct p80211msg *msg)
{
switch (msg->msgcode) {
case DIDmsg_lnxreq_hostwep:{
struct p80211msg_lnxreq_hostwep *req =
(struct p80211msg_lnxreq_hostwep *) msg;
wlandev->hostwep &=
~(HOSTWEP_DECRYPT | HOSTWEP_ENCRYPT);
if (req->decrypt.data == P80211ENUM_truth_true)
wlandev->hostwep |= HOSTWEP_DECRYPT;
if (req->encrypt.data == P80211ENUM_truth_true)
wlandev->hostwep |= HOSTWEP_ENCRYPT;
break;
}
case DIDmsg_dot11req_mibget:
case DIDmsg_dot11req_mibset:{
int isget = (msg->msgcode == DIDmsg_dot11req_mibget);
struct p80211msg_dot11req_mibget *mib_msg =
(struct p80211msg_dot11req_mibget *) msg;
p80211req_mibset_mibget(wlandev, mib_msg, isget);
}
default:
;
} /* switch msg->msgcode */
return;
}
static int p80211req_mibset_mibget(wlandevice_t *wlandev,
struct p80211msg_dot11req_mibget *mib_msg,
int isget)
{
p80211itemd_t *mibitem = (p80211itemd_t *) mib_msg->mibattribute.data;
p80211pstrd_t *pstr = (p80211pstrd_t *) mibitem->data;
u8 *key = mibitem->data + sizeof(p80211pstrd_t);
switch (mibitem->did) {
case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey0:{
if (!isget)
wep_change_key(wlandev, 0, key, pstr->len);
break;
}
case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey1:{
if (!isget)
wep_change_key(wlandev, 1, key, pstr->len);
break;
}
case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey2:{
if (!isget)
wep_change_key(wlandev, 2, key, pstr->len);
break;
}
case DIDmib_dot11smt_dot11WEPDefaultKeysTable_dot11WEPDefaultKey3:{
if (!isget)
wep_change_key(wlandev, 3, key, pstr->len);
break;
}
case DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID:{
u32 *data = (u32 *) mibitem->data;
if (isget) {
*data =
wlandev->hostwep & HOSTWEP_DEFAULTKEY_MASK;
} else {
wlandev->hostwep &= ~(HOSTWEP_DEFAULTKEY_MASK);
wlandev->hostwep |=
(*data & HOSTWEP_DEFAULTKEY_MASK);
}
break;
}
case DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked:{
u32 *data = (u32 *) mibitem->data;
if (isget) {
if (wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
*data = P80211ENUM_truth_true;
else
*data = P80211ENUM_truth_false;
} else {
wlandev->hostwep &= ~(HOSTWEP_PRIVACYINVOKED);
if (*data == P80211ENUM_truth_true)
wlandev->hostwep |=
HOSTWEP_PRIVACYINVOKED;
}
break;
}
case DIDmib_dot11smt_dot11PrivacyTable_dot11ExcludeUnencrypted:{
u32 *data = (u32 *) mibitem->data;
if (isget) {
if (wlandev->hostwep &
HOSTWEP_EXCLUDEUNENCRYPTED)
*data = P80211ENUM_truth_true;
else
*data = P80211ENUM_truth_false;
} else {
wlandev->hostwep &=
~(HOSTWEP_EXCLUDEUNENCRYPTED);
if (*data == P80211ENUM_truth_true)
wlandev->hostwep |=
HOSTWEP_EXCLUDEUNENCRYPTED;
}
break;
}
default:
;
}
return 0;
}
| gpl-2.0 |
knone1/Pinaslang-Kernel | arch/powerpc/platforms/52xx/lite5200_pm.c | 1502 | 6399 | #include <linux/init.h>
#include <linux/suspend.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
/* defined in lite5200_sleep.S and only used here */
extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar);
static struct mpc52xx_cdm __iomem *cdm;
static struct mpc52xx_intr __iomem *pic;
static struct mpc52xx_sdma __iomem *bes;
static struct mpc52xx_xlb __iomem *xlb;
static struct mpc52xx_gpio __iomem *gps;
static struct mpc52xx_gpio_wkup __iomem *gpw;
static void __iomem *pci;
static void __iomem *sram;
static const int sram_size = 0x4000; /* 16 kBytes */
static void __iomem *mbar;
static suspend_state_t lite5200_pm_target_state;
static int lite5200_pm_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
return 1;
default:
return 0;
}
}
static int lite5200_pm_begin(suspend_state_t state)
{
if (lite5200_pm_valid(state)) {
lite5200_pm_target_state = state;
return 0;
}
return -EINVAL;
}
static int lite5200_pm_prepare(void)
{
struct device_node *np;
const struct of_device_id immr_ids[] = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{}
};
u64 regaddr64 = 0;
const u32 *regaddr_p;
/* deep sleep? let mpc52xx code handle that */
if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
return mpc52xx_pm_prepare();
if (lite5200_pm_target_state != PM_SUSPEND_MEM)
return -EINVAL;
/* map registers */
np = of_find_matching_node(NULL, immr_ids);
regaddr_p = of_get_address(np, 0, NULL, NULL);
if (regaddr_p)
regaddr64 = of_translate_address(np, regaddr_p);
of_node_put(np);
mbar = ioremap((u32) regaddr64, 0xC000);
if (!mbar) {
printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__);
return -ENOSYS;
}
cdm = mbar + 0x200;
pic = mbar + 0x500;
gps = mbar + 0xb00;
gpw = mbar + 0xc00;
pci = mbar + 0xd00;
bes = mbar + 0x1200;
xlb = mbar + 0x1f00;
sram = mbar + 0x8000;
return 0;
}
/* save and restore registers not bound to any real devices */
static struct mpc52xx_cdm scdm;
static struct mpc52xx_intr spic;
static struct mpc52xx_sdma sbes;
static struct mpc52xx_xlb sxlb;
static struct mpc52xx_gpio sgps;
static struct mpc52xx_gpio_wkup sgpw;
static char spci[0x200];
static void lite5200_save_regs(void)
{
_memcpy_fromio(&spic, pic, sizeof(*pic));
_memcpy_fromio(&sbes, bes, sizeof(*bes));
_memcpy_fromio(&scdm, cdm, sizeof(*cdm));
_memcpy_fromio(&sxlb, xlb, sizeof(*xlb));
_memcpy_fromio(&sgps, gps, sizeof(*gps));
_memcpy_fromio(&sgpw, gpw, sizeof(*gpw));
_memcpy_fromio(spci, pci, 0x200);
_memcpy_fromio(saved_sram, sram, sram_size);
}
static void lite5200_restore_regs(void)
{
int i;
_memcpy_toio(sram, saved_sram, sram_size);
/* PCI Configuration */
_memcpy_toio(pci, spci, 0x200);
/*
* GPIOs. Interrupt Master Enable has higher address then other
* registers, so just memcpy is ok.
*/
_memcpy_toio(gpw, &sgpw, sizeof(*gpw));
_memcpy_toio(gps, &sgps, sizeof(*gps));
/* XLB Arbitrer */
out_be32(&xlb->snoop_window, sxlb.snoop_window);
out_be32(&xlb->master_priority, sxlb.master_priority);
out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable);
/* enable */
out_be32(&xlb->int_enable, sxlb.int_enable);
out_be32(&xlb->config, sxlb.config);
/* CDM - Clock Distribution Module */
out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel);
out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel);
out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en);
out_8(&cdm->fd_enable, scdm.fd_enable);
out_be16(&cdm->fd_counters, scdm.fd_counters);
out_be32(&cdm->clk_enables, scdm.clk_enables);
out_8(&cdm->osc_disable, scdm.osc_disable);
out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1);
out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2);
out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3);
out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6);
/* BESTCOMM */
out_be32(&bes->taskBar, sbes.taskBar);
out_be32(&bes->currentPointer, sbes.currentPointer);
out_be32(&bes->endPointer, sbes.endPointer);
out_be32(&bes->variablePointer, sbes.variablePointer);
out_8(&bes->IntVect1, sbes.IntVect1);
out_8(&bes->IntVect2, sbes.IntVect2);
out_be16(&bes->PtdCntrl, sbes.PtdCntrl);
for (i=0; i<32; i++)
out_8(&bes->ipr[i], sbes.ipr[i]);
out_be32(&bes->cReqSelect, sbes.cReqSelect);
out_be32(&bes->task_size0, sbes.task_size0);
out_be32(&bes->task_size1, sbes.task_size1);
out_be32(&bes->MDEDebug, sbes.MDEDebug);
out_be32(&bes->ADSDebug, sbes.ADSDebug);
out_be32(&bes->Value1, sbes.Value1);
out_be32(&bes->Value2, sbes.Value2);
out_be32(&bes->Control, sbes.Control);
out_be32(&bes->Status, sbes.Status);
out_be32(&bes->PTDDebug, sbes.PTDDebug);
/* restore tasks */
for (i=0; i<16; i++)
out_be16(&bes->tcr[i], sbes.tcr[i]);
/* enable interrupts */
out_be32(&bes->IntPend, sbes.IntPend);
out_be32(&bes->IntMask, sbes.IntMask);
/* PIC */
out_be32(&pic->per_pri1, spic.per_pri1);
out_be32(&pic->per_pri2, spic.per_pri2);
out_be32(&pic->per_pri3, spic.per_pri3);
out_be32(&pic->main_pri1, spic.main_pri1);
out_be32(&pic->main_pri2, spic.main_pri2);
out_be32(&pic->enc_status, spic.enc_status);
/* unmask and enable interrupts */
out_be32(&pic->per_mask, spic.per_mask);
out_be32(&pic->main_mask, spic.main_mask);
out_be32(&pic->ctrl, spic.ctrl);
}
static int lite5200_pm_enter(suspend_state_t state)
{
/* deep sleep? let mpc52xx code handle that */
if (state == PM_SUSPEND_STANDBY) {
return mpc52xx_pm_enter(state);
}
lite5200_save_regs();
/* effectively save FP regs */
enable_kernel_fp();
lite5200_low_power(sram, mbar);
lite5200_restore_regs();
/* restart jiffies */
wakeup_decrementer();
iounmap(mbar);
return 0;
}
static void lite5200_pm_finish(void)
{
/* deep sleep? let mpc52xx code handle that */
if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
mpc52xx_pm_finish();
}
static void lite5200_pm_end(void)
{
lite5200_pm_target_state = PM_SUSPEND_ON;
}
static struct platform_suspend_ops lite5200_pm_ops = {
.valid = lite5200_pm_valid,
.begin = lite5200_pm_begin,
.prepare = lite5200_pm_prepare,
.enter = lite5200_pm_enter,
.finish = lite5200_pm_finish,
.end = lite5200_pm_end,
};
int __init lite5200_pm_init(void)
{
suspend_set_ops(&lite5200_pm_ops);
return 0;
}
| gpl-2.0 |
nspierbundel/amlogic-common | drivers/rtc/rtc-ds1511.c | 2526 | 14590 | /*
* An rtc driver for the Dallas DS1511
*
* Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
* Copyright (C) 2007 Andrew Sharp <andy.sharp@lsi.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Real time clock driver for the Dallas 1511 chip, which also
* contains a watchdog timer. There is a tiny amount of code that
* platform code could use to mess with the watchdog device a little
* bit, but not a full watchdog driver.
*/
#include <linux/bcd.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#define DRV_VERSION "0.6"
enum ds1511reg {
DS1511_SEC = 0x0,
DS1511_MIN = 0x1,
DS1511_HOUR = 0x2,
DS1511_DOW = 0x3,
DS1511_DOM = 0x4,
DS1511_MONTH = 0x5,
DS1511_YEAR = 0x6,
DS1511_CENTURY = 0x7,
DS1511_AM1_SEC = 0x8,
DS1511_AM2_MIN = 0x9,
DS1511_AM3_HOUR = 0xa,
DS1511_AM4_DATE = 0xb,
DS1511_WD_MSEC = 0xc,
DS1511_WD_SEC = 0xd,
DS1511_CONTROL_A = 0xe,
DS1511_CONTROL_B = 0xf,
DS1511_RAMADDR_LSB = 0x10,
DS1511_RAMDATA = 0x13
};
#define DS1511_BLF1 0x80
#define DS1511_BLF2 0x40
#define DS1511_PRS 0x20
#define DS1511_PAB 0x10
#define DS1511_TDF 0x08
#define DS1511_KSF 0x04
#define DS1511_WDF 0x02
#define DS1511_IRQF 0x01
#define DS1511_TE 0x80
#define DS1511_CS 0x40
#define DS1511_BME 0x20
#define DS1511_TPE 0x10
#define DS1511_TIE 0x08
#define DS1511_KIE 0x04
#define DS1511_WDE 0x02
#define DS1511_WDS 0x01
#define DS1511_RAM_MAX 0xff
#define RTC_CMD DS1511_CONTROL_B
#define RTC_CMD1 DS1511_CONTROL_A
#define RTC_ALARM_SEC DS1511_AM1_SEC
#define RTC_ALARM_MIN DS1511_AM2_MIN
#define RTC_ALARM_HOUR DS1511_AM3_HOUR
#define RTC_ALARM_DATE DS1511_AM4_DATE
#define RTC_SEC DS1511_SEC
#define RTC_MIN DS1511_MIN
#define RTC_HOUR DS1511_HOUR
#define RTC_DOW DS1511_DOW
#define RTC_DOM DS1511_DOM
#define RTC_MON DS1511_MONTH
#define RTC_YEAR DS1511_YEAR
#define RTC_CENTURY DS1511_CENTURY
#define RTC_TIE DS1511_TIE
#define RTC_TE DS1511_TE
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
int alrm_sec;
int alrm_min;
int alrm_hour;
int alrm_mday;
spinlock_t lock;
};
static DEFINE_SPINLOCK(ds1511_lock);
static __iomem char *ds1511_base;
static u32 reg_spacing = 1;
static noinline void
rtc_write(uint8_t val, uint32_t reg)
{
writeb(val, ds1511_base + (reg * reg_spacing));
}
static inline void
rtc_write_alarm(uint8_t val, enum ds1511reg reg)
{
rtc_write((val | 0x80), reg);
}
static noinline uint8_t
rtc_read(enum ds1511reg reg)
{
return readb(ds1511_base + (reg * reg_spacing));
}
static inline void
rtc_disable_update(void)
{
rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
}
static void
rtc_enable_update(void)
{
rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
}
/*
* #define DS1511_WDOG_RESET_SUPPORT
*
* Uncomment this if you want to use these routines in
* some platform code.
*/
#ifdef DS1511_WDOG_RESET_SUPPORT
/*
* just enough code to set the watchdog timer so that it
* will reboot the system
*/
void
ds1511_wdog_set(unsigned long deciseconds)
{
/*
* the wdog timer can take 99.99 seconds
*/
deciseconds %= 10000;
/*
* set the wdog values in the wdog registers
*/
rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC);
rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC);
/*
* set wdog enable and wdog 'steering' bit to issue a reset
*/
rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD);
}
void
ds1511_wdog_disable(void)
{
/*
* clear wdog enable and wdog 'steering' bits
*/
rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
/*
* clear the wdog counter
*/
rtc_write(0, DS1511_WD_MSEC);
rtc_write(0, DS1511_WD_SEC);
}
#endif
/*
* set the rtc chip's idea of the time.
* stupidly, some callers call with year unmolested;
* and some call with year = year - 1900. thanks.
*/
static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
unsigned long flags;
/*
* won't have to change this for a while
*/
if (rtc_tm->tm_year < 1900) {
rtc_tm->tm_year += 1900;
}
if (rtc_tm->tm_year < 1970) {
return -EINVAL;
}
yrs = rtc_tm->tm_year % 100;
cen = rtc_tm->tm_year / 100;
mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm->tm_mday;
dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
hrs = rtc_tm->tm_hour;
min = rtc_tm->tm_min;
sec = rtc_tm->tm_sec;
if ((mon > 12) || (day == 0)) {
return -EINVAL;
}
if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) {
return -EINVAL;
}
if ((hrs >= 24) || (min >= 60) || (sec >= 60)) {
return -EINVAL;
}
/*
* each register is a different number of valid bits
*/
sec = bin2bcd(sec) & 0x7f;
min = bin2bcd(min) & 0x7f;
hrs = bin2bcd(hrs) & 0x3f;
day = bin2bcd(day) & 0x3f;
mon = bin2bcd(mon) & 0x1f;
yrs = bin2bcd(yrs) & 0xff;
cen = bin2bcd(cen) & 0xff;
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
rtc_write(cen, RTC_CENTURY);
rtc_write(yrs, RTC_YEAR);
rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
rtc_write(day, RTC_DOM);
rtc_write(hrs, RTC_HOUR);
rtc_write(min, RTC_MIN);
rtc_write(sec, RTC_SEC);
rtc_write(dow, RTC_DOW);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
return 0;
}
static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int century;
unsigned long flags;
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
century = rtc_read(RTC_CENTURY);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday);
rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
century = bcd2bin(century) * 100;
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
*/
century += rtc_tm->tm_year;
rtc_tm->tm_year = century - 1900;
rtc_tm->tm_mon--;
if (rtc_valid_tm(rtc_tm) < 0) {
dev_err(dev, "retrieved date/time is not valid.\n");
rtc_time_to_tm(0, rtc_tm);
}
return 0;
}
/*
* write the alarm register settings
*
* we only have the use to interrupt every second, otherwise
* known as the update interrupt, or the interrupt if the whole
* date/hours/mins/secs matches. the ds1511 has many more
* permutations, but the kernel doesn't.
*/
static void
ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
{
unsigned long flags;
spin_lock_irqsave(&pdata->lock, flags);
rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
RTC_ALARM_DATE);
rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_hour) & 0x3f,
RTC_ALARM_HOUR);
rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_min) & 0x7f,
RTC_ALARM_MIN);
rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_sec) & 0x7f,
RTC_ALARM_SEC);
rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
rtc_read(RTC_CMD1); /* clear interrupts */
spin_unlock_irqrestore(&pdata->lock, flags);
}
static int
ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -EINVAL;
pdata->alrm_mday = alrm->time.tm_mday;
pdata->alrm_hour = alrm->time.tm_hour;
pdata->alrm_min = alrm->time.tm_min;
pdata->alrm_sec = alrm->time.tm_sec;
if (alrm->enabled) {
pdata->irqen |= RTC_AF;
}
ds1511_rtc_update_alarm(pdata);
return 0;
}
static int
ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -EINVAL;
alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
return 0;
}
static irqreturn_t
ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
unsigned long events = 0;
spin_lock(&pdata->lock);
/*
* read and clear interrupt
*/
if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
events = RTC_IRQF;
if (rtc_read(RTC_ALARM_SEC) & 0x80)
events |= RTC_UF;
else
events |= RTC_AF;
if (likely(pdata->rtc))
rtc_update_irq(pdata->rtc, 1, events);
}
spin_unlock(&pdata->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
return -EINVAL;
if (enabled)
pdata->irqen |= RTC_AF;
else
pdata->irqen &= ~RTC_AF;
ds1511_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1511_rtc_ops = {
.read_time = ds1511_rtc_read_time,
.set_time = ds1511_rtc_set_time,
.read_alarm = ds1511_rtc_read_alarm,
.set_alarm = ds1511_rtc_set_alarm,
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
};
static ssize_t
ds1511_nvram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *ba,
char *buf, loff_t pos, size_t size)
{
ssize_t count;
/*
* if count is more than one, turn on "burst" mode
* turn it off when you're done
*/
if (size > 1) {
rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
}
if (pos > DS1511_RAM_MAX) {
pos = DS1511_RAM_MAX;
}
if (size + pos > DS1511_RAM_MAX + 1) {
size = DS1511_RAM_MAX - pos + 1;
}
rtc_write(pos, DS1511_RAMADDR_LSB);
for (count = 0; size > 0; count++, size--) {
*buf++ = rtc_read(DS1511_RAMDATA);
}
if (count > 1) {
rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
}
return count;
}
static ssize_t
ds1511_nvram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
ssize_t count;
/*
* if count is more than one, turn on "burst" mode
* turn it off when you're done
*/
if (size > 1) {
rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
}
if (pos > DS1511_RAM_MAX) {
pos = DS1511_RAM_MAX;
}
if (size + pos > DS1511_RAM_MAX + 1) {
size = DS1511_RAM_MAX - pos + 1;
}
rtc_write(pos, DS1511_RAMADDR_LSB);
for (count = 0; size > 0; count++, size--) {
rtc_write(*buf++, DS1511_RAMDATA);
}
if (count > 1) {
rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
}
return count;
}
static struct bin_attribute ds1511_nvram_attr = {
.attr = {
.name = "nvram",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS1511_RAM_MAX,
.read = ds1511_nvram_read,
.write = ds1511_nvram_write,
};
static int __devinit
ds1511_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
struct rtc_plat_data *pdata;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
return -ENODEV;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->size = res->end - res->start + 1;
if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
pdev->name))
return -EBUSY;
ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
if (!ds1511_base)
return -ENOMEM;
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
/*
* turn on the clock and the crystal, etc.
*/
rtc_write(0, RTC_CMD);
rtc_write(0, RTC_CMD1);
/*
* clear the wdog counter
*/
rtc_write(0, DS1511_WD_MSEC);
rtc_write(0, DS1511_WD_SEC);
/*
* start the clock
*/
rtc_enable_update();
/*
* check for a dying bat-tree
*/
if (rtc_read(RTC_CMD1) & DS1511_BLF1) {
dev_warn(&pdev->dev, "voltage-low detected.\n");
}
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
if (pdata->irq > 0) {
rtc_read(RTC_CMD1);
if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
}
}
rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
pdata->rtc = rtc;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
if (ret)
rtc_device_unregister(pdata->rtc);
return ret;
}
static int __devexit
ds1511_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
rtc_device_unregister(pdata->rtc);
if (pdata->irq > 0) {
/*
* disable the alarm interrupt
*/
rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
rtc_read(RTC_CMD1);
}
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
.remove = __devexit_p(ds1511_rtc_remove),
.driver = {
.name = "ds1511",
.owner = THIS_MODULE,
},
};
static int __init
ds1511_rtc_init(void)
{
return platform_driver_register(&ds1511_rtc_driver);
}
static void __exit
ds1511_rtc_exit(void)
{
platform_driver_unregister(&ds1511_rtc_driver);
}
module_init(ds1511_rtc_init);
module_exit(ds1511_rtc_exit);
MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_frescolteskt | drivers/usb/musb/musb_gadget.c | 2782 | 60164 | /*
* MUSB OTG driver peripheral support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "musb_core.h"
/* MUSB PERIPHERAL status 3-mar-2006:
*
* - EP0 seems solid. It passes both USBCV and usbtest control cases.
* Minor glitches:
*
* + remote wakeup to Linux hosts work, but saw USBCV failures;
* in one test run (operator error?)
* + endpoint halt tests -- in both usbtest and usbcv -- seem
* to break when dma is enabled ... is something wrongly
* clearing SENDSTALL?
*
* - Mass storage behaved ok when last tested. Network traffic patterns
* (with lots of short transfers etc) need retesting; they turn up the
* worst cases of the DMA, since short packets are typical but are not
* required.
*
* - TX/IN
* + both pio and dma behave in with network and g_zero tests
* + no cppi throughput issues other than no-hw-queueing
* + failed with FLAT_REG (DaVinci)
* + seems to behave with double buffering, PIO -and- CPPI
* + with gadgetfs + AIO, requests got lost?
*
* - RX/OUT
* + both pio and dma behave in with network and g_zero tests
* + dma is slow in typical case (short_not_ok is clear)
* + double buffering ok with PIO
* + double buffering *FAILS* with CPPI, wrong data bytes sometimes
* + request lossage observed with gadgetfs
*
* - ISO not tested ... might work, but only weakly isochronous
*
* - Gadget driver disabling of softconnect during bind() is ignored; so
* drivers can't hold off host requests until userspace is ready.
* (Workaround: they can turn it off later.)
*
* - PORTABILITY (assumes PIO works):
* + DaVinci, basically works with cppi dma
* + OMAP 2430, ditto with mentor dma
* + TUSB 6010, platform-specific dma in the works
*/
/* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb, struct musb_ep *musb_ep)
{
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single(
musb->controller,
request->request.buf,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = PRE_MAPPED;
}
}
/* Unmap the buffer from dma and maps it back to cpu */
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
if (!is_buffer_mapped(request))
return;
if (request->request.dma == DMA_ADDR_INVALID) {
dev_vdbg(musb->controller,
"not unmapping a never mapped buffer\n");
return;
}
if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
} else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
request->map_state = UN_MAPPED;
}
/*
* Immediately complete a request.
*
* @param request the request to complete
* @param status the status to complete the request with
* Context: controller locked, IRQs blocked.
*/
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
struct musb_request *req;
struct musb *musb;
int busy = ep->busy;
req = to_musb_request(request);
list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
unmap_dma_buffer(req, musb);
if (request->status == 0)
dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
req->request.actual, req->request.length);
else
dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
ep->end_point.name, request,
req->request.actual, req->request.length,
request->status);
req->request.complete(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
}
/* ----------------------------------------------------------------------- */
/*
* Abort requests queued to an endpoint using the status. Synchronous.
* caller locked controller and blocked irqs, and selected this ep.
*/
static void nuke(struct musb_ep *ep, const int status)
{
struct musb *musb = ep->musb;
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
ep->busy = 1;
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
if (ep->is_in) {
/*
* The programming guide says that we must not clear
* the DMAMODE bit before DMAENAB, so we only
* clear it in the second write...
*/
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
}
value = c->channel_abort(ep->dma);
dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
while (!list_empty(&ep->req_list)) {
req = list_first_entry(&ep->req_list, struct musb_request, list);
musb_g_giveback(ep, &req->request, status);
}
}
/* ----------------------------------------------------------------------- */
/* Data transfers - pure PIO, pure DMA, or mixed mode */
/*
* This assumes the separate CPPI engine is responding to DMA requests
* from the usb core ... sequenced a bit differently from mentor dma.
*/
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
if (can_bulk_split(musb, ep->type))
return ep->hw_ep->max_packet_sz_tx;
else
return ep->packet_sz;
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Peripheral tx (IN) using Mentor DMA works as follows:
Only mode 0 is used for transfers <= wPktSize,
mode 1 is used for larger transfers,
One of the following happens:
- Host sends IN token which causes an endpoint interrupt
-> TxAvail
-> if DMA is currently busy, exit.
-> if queue is non-empty, txstate().
- Request is queued by the gadget driver.
-> if queue was previously empty, txstate()
txstate()
-> start
/\ -> setup DMA
| (data is transferred to the FIFO, then sent out when
| IN token(s) are recd from Host.
| -> DMA interrupt on completion
| calls TxAvail.
| -> stop DMA, ~DMAENAB,
| -> set TxPktRdy for last short pkt or zlp
| -> Complete Request
| -> Continue next request (call txstate)
|___________________________________|
* Non-Mentor DMA engines can of course work differently, such as by
* upleveling from irq-per-packet to irq-per-buffer.
*/
#endif
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
* endpoint.
*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void txstate(struct musb *musb, struct musb_request *req)
{
u8 epnum = req->epnum;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct usb_request *request;
u16 fifo_count = 0, csr;
int use_dma = 0;
musb_ep = req->ep;
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "dma pending...\n");
return;
}
/* read TXCSR before */
csr = musb_readw(epio, MUSB_TXCSR);
request = &req->request;
fifo_count = min(max_ep_writesize(musb, musb_ep),
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
/* setup DMA, then program endpoint CSR */
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
/* MUSB_TXCSR_P_ISO is still set correctly */
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
{
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
* We must not clear the DMAMODE bit
* before the DMAENAB bit -- and the
* latter doesn't always get cleared
* before we get here...
*/
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr
| MUSB_TXCSR_P_WZC_BITS);
csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
} else {
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
if (!musb_ep->hb_mult)
csr |= MUSB_TXCSR_AUTOSET;
}
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
}
#elif defined(CONFIG_USB_TI_CPPI_DMA)
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/* NOTE host side sets DMAENAB later than this; both are
* OK since the transfer dma glue (between CPPI and Mentor
* fifos) just tells CPPI it could start. Data only moves
* to the USB TX fifo when both fifos are ready.
*/
/* "mode" is irrelevant here; handle terminating ZLPs like
* PIO does, since the hardware RNDIS mode seems unreliable
* except for the last-packet-is-already-short case.
*/
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
request->dma + request->actual,
request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
/* invariant: prequest->buf is non-null */
}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma + request->actual,
request_size);
#endif
}
#endif
if (!use_dma) {
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
csr |= MUSB_TXCSR_TXPKTRDY;
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
/* host may already have the data when this message shows... */
dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
fifo_count,
musb_readw(epio, MUSB_TXMAXP));
}
/*
* FIFO state update (e.g. data ready).
* Called from IRQ, with controller locked.
*/
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
request = &req->request;
csr = musb_readw(epio, MUSB_TXCSR);
dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
/*
* REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
* probably rates reporting as a host error.
*/
if (csr & MUSB_TXCSR_P_SENTSTALL) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~MUSB_TXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_TXCSR, csr);
return;
}
if (csr & MUSB_TXCSR_P_UNDERRUN) {
/* We NAKed, no big deal... little reason to care. */
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
epnum, request);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/*
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
*/
dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
return;
}
if (request) {
u8 is_dma = 0;
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
musb_writew(epio, MUSB_TXCSR, csr);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
epnum, csr, musb_ep->dma->actual_len, request);
}
/*
* First, maybe a terminating short packet. Some DMA
* engines might handle this by themselves.
*/
if ((request->zero && request->length
&& (request->length % musb_ep->packet_sz == 0)
&& (request->actual == request->length))
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
|| (is_dma && (!dma->desired_mode ||
(request->actual &
(musb_ep->packet_sz - 1))))
#endif
) {
/*
* On DMA completion, FIFO may not be
* available yet...
*/
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
}
if (request->actual == request->length) {
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
dev_dbg(musb->controller, "%s idle now\n",
musb_ep->end_point.name);
return;
}
}
txstate(musb, req);
}
}
/* ------------------------------------------------------------ */
#ifdef CONFIG_USB_INVENTRA_DMA
/* Peripheral rx (OUT) using Mentor DMA works as follows:
- Only mode 0 is used.
- Request is queued by the gadget class driver.
-> if queue was previously empty, rxstate()
- Host sends OUT token which causes an endpoint interrupt
/\ -> RxReady
| -> if request queued, call rxstate
| /\ -> setup DMA
| | -> DMA interrupt on completion
| | -> RxReady
| | -> stop DMA
| | -> ack the read
| | -> if data recd = max expected
| | by the request, or host
| | sent a short packet,
| | complete the request,
| | and start the next one.
| |_____________________________________|
| else just wait for the host
| to send the next OUT token.
|__________________________________________________|
* Non-Mentor DMA engines can of course work differently.
*/
#endif
/*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void rxstate(struct musb *musb, struct musb_request *req)
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "DMA pending...\n");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
musb_ep->end_point.name, csr);
return;
}
if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
/* NOTE: CPPI won't actually stop advancing the DMA
* queue after short packet transfers, so this is almost
* always going to run as IRQ-per-packet DMA so that
* faults will be handled correctly.
*/
if (c->channel_program(channel,
musb_ep->packet_sz,
!request->short_not_ok,
request->dma + request->actual,
request->length - request->actual)) {
/* make sure that if an rxpkt arrived after the irq,
* the cppi engine will be ready to take it as soon
* as DMA is enabled
*/
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAMODE);
csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
/*
* Enable Mode 1 on RX transfers only when short_not_ok flag
* is set. Currently short_not_ok flag is set only from
* file_storage and f_mass_storage drivers
*/
if (request->short_not_ok && len == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the tranfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, csr);
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
/*
* this special sequence (enabling and then
* disabling MUSB_RXCSR_DMAMODE) is required
* to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
} else {
if (!musb_ep->hb_mult &&
musb_ep->hw_ep->rx_double_buffered)
csr |= MUSB_RXCSR_AUTOCLEAR;
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (request->actual < request->length) {
int transfer_size = 0;
if (use_mode_1) {
transfer_size = min(request->length - request->actual,
channel->max_len);
musb_ep->dma->desired_mode = 1;
} else {
transfer_size = min(request->length - request->actual,
(unsigned)len);
musb_ep->dma->desired_mode = 0;
}
use_dma = c->channel_program(
channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size);
}
if (use_dma)
return;
}
#elif defined(CONFIG_USB_UX500_DMA)
if ((is_buffer_mapped(req)) &&
(request->actual < request->length)) {
struct dma_controller *c;
struct dma_channel *channel;
int transfer_size = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* In case first packet is short */
if (len < musb_ep->packet_sz)
transfer_size = len;
else if (request->short_not_ok)
transfer_size = min(request->length -
request->actual,
channel->max_len);
else
transfer_size = min(request->length -
request->actual,
(unsigned)len);
csr &= ~MUSB_RXCSR_DMAMODE;
csr |= (MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
if (transfer_size <= musb_ep->packet_sz) {
musb_ep->dma->desired_mode = 0;
} else {
musb_ep->dma->desired_mode = 1;
/* Mode must be set after DMAENAB */
csr |= MUSB_RXCSR_DMAMODE;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size))
return;
}
#endif /* Mentor's DMA */
fifo_count = request->length - request->actual;
dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
len, fifo_count,
musb_ep->packet_sz);
fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
int ret;
ret = c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
dma_addr,
fifo_count);
if (ret)
return;
}
#endif
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb);
/*
* Clear DMAENAB and AUTOCLEAR for the
* PIO mode transfer
*/
csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
}
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
/* REVISIT if we left anything in the fifo, flush
* it and report -EOVERFLOW
*/
/* ack the read! */
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* reach the end or short packet detected */
if (request->actual == request->length || len < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
/*
* Data ready for a request; called from IRQ
*/
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
if (!req)
return;
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
if (csr & MUSB_RXCSR_P_OVERRUN) {
/* csr |= MUSB_RXCSR_P_WZC_BITS; */
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
dev_dbg(musb->controller, "%s busy, csr %04x\n",
musb_ep->end_point.name, csr);
return;
}
if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_P_WZC_BITS | csr);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
epnum, csr,
musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request);
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
|| (dma->actual_len
& (musb_ep->packet_sz - 1))) {
/* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
/* incomplete, and not short? wait for next IN packet */
if ((request->actual < request->length)
&& (musb_ep->dma->actual_len
== musb_ep->packet_sz)) {
/* In double buffer case, continue to unload fifo if
* there is Rx packet in FIFO.
**/
csr = musb_readw(epio, MUSB_RXCSR);
if ((csr & MUSB_RXCSR_RXPKTRDY) &&
hw_ep->rx_double_buffered)
goto exit;
return;
}
#endif
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
if (!req)
return;
}
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
exit:
#endif
/* Analyze request */
rxstate(musb, req);
}
/* ------------------------------------------------------------ */
static int musb_gadget_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
unsigned long flags;
struct musb_ep *musb_ep;
struct musb_hw_ep *hw_ep;
void __iomem *regs;
struct musb *musb;
void __iomem *mbase;
u8 epnum;
u16 csr;
unsigned tmp;
int status = -EINVAL;
if (!ep || !desc)
return -EINVAL;
musb_ep = to_musb_ep(ep);
hw_ep = musb_ep->hw_ep;
regs = hw_ep->regs;
musb = musb_ep->musb;
mbase = musb->mregs;
epnum = musb_ep->current_epnum;
spin_lock_irqsave(&musb->lock, flags);
if (musb_ep->desc) {
status = -EBUSY;
goto fail;
}
musb_ep->type = usb_endpoint_type(desc);
/* check direction and (later) maxpacket size against endpoint */
if (usb_endpoint_num(desc) != epnum)
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
tmp = usb_endpoint_maxp(desc);
if (tmp & ~0x07ff) {
int ok;
if (usb_endpoint_dir_in(desc))
ok = musb->hb_iso_tx;
else
ok = musb->hb_iso_rx;
if (!ok) {
dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
goto fail;
}
musb_ep->hb_mult = (tmp >> 11) & 3;
} else {
musb_ep->hb_mult = 0;
}
musb_ep->packet_sz = tmp & 0x7ff;
tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint
* packet size (or fail), set the mode, clear the fifo
*/
musb_ep_select(mbase, epnum);
if (usb_endpoint_dir_in(desc)) {
u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 1;
if (!musb_ep->is_in)
goto fail;
if (tmp > hw_ep->max_packet_sz_tx) {
dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
goto fail;
}
int_txe |= (1 << epnum);
musb_writew(mbase, MUSB_INTRTXE, int_txe);
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
/* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
& MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
csr |= MUSB_TXCSR_P_ISO;
/* set twice in case of double buffering */
musb_writew(regs, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(regs, MUSB_TXCSR, csr);
} else {
u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
if (hw_ep->is_shared_fifo)
musb_ep->is_in = 0;
if (musb_ep->is_in)
goto fail;
if (tmp > hw_ep->max_packet_sz_rx) {
dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
goto fail;
}
int_rxe |= (1 << epnum);
musb_writew(mbase, MUSB_INTRRXE, int_rxe);
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
/* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffering mode.
*/
if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
csr = musb_readw(regs, MUSB_TXCSR);
csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
musb_writew(regs, MUSB_TXCSR, csr);
}
csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
csr |= MUSB_RXCSR_P_ISO;
else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
csr |= MUSB_RXCSR_DISNYET;
/* set twice in case of double buffering */
musb_writew(regs, MUSB_RXCSR, csr);
musb_writew(regs, MUSB_RXCSR, csr);
}
/* NOTE: all the I/O code _should_ work fine without DMA, in case
* for some reason you run out of channels here.
*/
if (is_dma_capable() && musb->dma_controller) {
struct dma_controller *c = musb->dma_controller;
musb_ep->dma = c->channel_alloc(c, hw_ep,
(desc->bEndpointAddress & USB_DIR_IN));
} else
musb_ep->dma = NULL;
musb_ep->desc = desc;
musb_ep->busy = 0;
musb_ep->wedged = 0;
status = 0;
pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
musb_driver_name, musb_ep->end_point.name,
({ char *s; switch (musb_ep->type) {
case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
case USB_ENDPOINT_XFER_INT: s = "int"; break;
default: s = "iso"; break;
}; s; }),
musb_ep->is_in ? "IN" : "OUT",
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
schedule_work(&musb->irq_work);
fail:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Disable an endpoint flushing all requests queued.
*/
static int musb_gadget_disable(struct usb_ep *ep)
{
unsigned long flags;
struct musb *musb;
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
int status = 0;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
epnum = musb_ep->current_epnum;
epio = musb->endpoints[epnum].regs;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(musb->mregs, epnum);
/* zero the endpoint sizes */
if (musb_ep->is_in) {
u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
int_txe &= ~(1 << epnum);
musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
musb_writew(epio, MUSB_TXMAXP, 0);
} else {
u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
int_rxe &= ~(1 << epnum);
musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
musb_writew(epio, MUSB_RXMAXP, 0);
}
musb_ep->desc = NULL;
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
schedule_work(&musb->irq_work);
spin_unlock_irqrestore(&(musb->lock), flags);
dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
return status;
}
/*
* Allocate a request for an endpoint.
* Reused by ep0 code.
*/
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb *musb = musb_ep->musb;
struct musb_request *request = NULL;
request = kzalloc(sizeof *request, gfp_flags);
if (!request) {
dev_dbg(musb->controller, "not enough memory\n");
return NULL;
}
request->request.dma = DMA_ADDR_INVALID;
request->epnum = musb_ep->current_epnum;
request->ep = musb_ep;
return &request->request;
}
/*
* Free a request
* Reused by ep0 code.
*/
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
kfree(to_musb_request(req));
}
static LIST_HEAD(buffers);
struct free_record {
struct list_head list;
struct device *dev;
unsigned bytes;
dma_addr_t dma;
};
/*
* Context: controller locked, IRQs blocked.
*/
void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
req->tx ? "TX/IN" : "RX/OUT",
&req->request, req->request.length, req->epnum);
musb_ep_select(musb->mregs, req->epnum);
if (req->tx)
txstate(musb, req);
else
rxstate(musb, req);
}
static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct musb_ep *musb_ep;
struct musb_request *request;
struct musb *musb;
int status = 0;
unsigned long lockflags;
if (!ep || !req)
return -EINVAL;
if (!req->buf)
return -ENODATA;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
request = to_musb_request(req);
request->musb = musb;
if (request->ep != musb_ep)
return -EINVAL;
dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
/* request is mine now... */
request->request.actual = 0;
request->request.status = -EINPROGRESS;
request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in;
map_dma_buffer(request, musb, musb_ep);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
if (!musb_ep->desc) {
dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
req, ep->name, "disabled");
status = -ESHUTDOWN;
goto cleanup;
}
/* add request to the list */
list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
musb_ep_restart(musb, request);
cleanup:
spin_unlock_irqrestore(&musb->lock, lockflags);
return status;
}
static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb_request *req = to_musb_request(request);
struct musb_request *r;
unsigned long flags;
int status = 0;
struct musb *musb = musb_ep->musb;
if (!ep || !request || to_musb_request(request)->ep != musb_ep)
return -EINVAL;
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
if (r == req)
break;
}
if (r != req) {
dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
status = -EINVAL;
goto done;
}
/* if the hardware doesn't have the request, easy ... */
if (musb_ep->req_list.next != &req->list || musb_ep->busy)
musb_g_giveback(musb_ep, request, -ECONNRESET);
/* ... else abort the dma transfer ... */
else if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
musb_ep_select(musb->mregs, musb_ep->current_epnum);
if (c->channel_abort)
status = c->channel_abort(musb_ep->dma);
else
status = -EBUSY;
if (status == 0)
musb_g_giveback(musb_ep, request, -ECONNRESET);
} else {
/* NOTE: by sticking to easily tested hardware/driver states,
* we leave counting of in-flight packets imprecise.
*/
musb_g_giveback(musb_ep, request, -ECONNRESET);
}
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
*/
static int musb_gadget_set_halt(struct usb_ep *ep, int value)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
u8 epnum = musb_ep->current_epnum;
struct musb *musb = musb_ep->musb;
void __iomem *epio = musb->endpoints[epnum].regs;
void __iomem *mbase;
unsigned long flags;
u16 csr;
struct musb_request *request;
int status = 0;
if (!ep)
return -EINVAL;
mbase = musb->mregs;
spin_lock_irqsave(&musb->lock, flags);
if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
status = -EINVAL;
goto done;
}
musb_ep_select(mbase, epnum);
request = next_request(musb_ep);
if (value) {
if (request) {
dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
ep->name);
status = -EAGAIN;
goto done;
}
/* Cannot portably stall with non-empty FIFO */
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
status = -EAGAIN;
goto done;
}
}
} else
musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
csr |= MUSB_TXCSR_P_WZC_BITS
| MUSB_TXCSR_CLRDATATOG;
if (value)
csr |= MUSB_TXCSR_P_SENDSTALL;
else
csr &= ~(MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_P_SENTSTALL);
csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
} else {
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_WZC_BITS
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG;
if (value)
csr |= MUSB_RXCSR_P_SENDSTALL;
else
csr &= ~(MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_P_SENTSTALL);
musb_writew(epio, MUSB_RXCSR, csr);
}
/* maybe start the first request in the queue */
if (!musb_ep->busy && !value && request) {
dev_dbg(musb->controller, "restarting the request\n");
musb_ep_restart(musb, request);
}
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
/*
* Sets the halt feature with the clear requests ignored
*/
static int musb_gadget_set_wedge(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
if (!ep)
return -EINVAL;
musb_ep->wedged = 1;
return usb_ep_set_halt(ep);
}
static int musb_gadget_fifo_status(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
void __iomem *epio = musb_ep->hw_ep->regs;
int retval = -EINVAL;
if (musb_ep->desc && !musb_ep->is_in) {
struct musb *musb = musb_ep->musb;
int epnum = musb_ep->current_epnum;
void __iomem *mbase = musb->mregs;
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(mbase, epnum);
/* FIXME return zero unless RXPKTRDY is set */
retval = musb_readw(epio, MUSB_RXCOUNT);
spin_unlock_irqrestore(&musb->lock, flags);
}
return retval;
}
static void musb_gadget_fifo_flush(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
struct musb *musb = musb_ep->musb;
u8 epnum = musb_ep->current_epnum;
void __iomem *epio = musb->endpoints[epnum].regs;
void __iomem *mbase;
unsigned long flags;
u16 csr, int_txe;
mbase = musb->mregs;
spin_lock_irqsave(&musb->lock, flags);
musb_ep_select(mbase, (u8) epnum);
/* disable interrupts */
int_txe = musb_readw(mbase, MUSB_INTRTXE);
musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
/*
* Setting both TXPKTRDY and FLUSHFIFO makes controller
* to interrupt current FIFO loading, but not flushing
* the already loaded ones.
*/
csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(epio, MUSB_TXCSR, csr);
}
} else {
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
musb_writew(epio, MUSB_RXCSR, csr);
}
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
spin_unlock_irqrestore(&musb->lock, flags);
}
static const struct usb_ep_ops musb_ep_ops = {
.enable = musb_gadget_enable,
.disable = musb_gadget_disable,
.alloc_request = musb_alloc_request,
.free_request = musb_free_request,
.queue = musb_gadget_queue,
.dequeue = musb_gadget_dequeue,
.set_halt = musb_gadget_set_halt,
.set_wedge = musb_gadget_set_wedge,
.fifo_status = musb_gadget_fifo_status,
.fifo_flush = musb_gadget_fifo_flush
};
/* ----------------------------------------------------------------------- */
static int musb_gadget_get_frame(struct usb_gadget *gadget)
{
struct musb *musb = gadget_to_musb(gadget);
return (int)musb_readw(musb->mregs, MUSB_FRAME);
}
static int musb_gadget_wakeup(struct usb_gadget *gadget)
{
struct musb *musb = gadget_to_musb(gadget);
void __iomem *mregs = musb->mregs;
unsigned long flags;
int status = -EINVAL;
u8 power, devctl;
int retries;
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->state) {
case OTG_STATE_B_PERIPHERAL:
/* NOTE: OTG state machine doesn't include B_SUSPENDED;
* that's part of the standard usb 1.1 state machine, and
* doesn't affect OTG transitions.
*/
if (musb->may_wakeup && musb->is_suspended)
break;
goto done;
case OTG_STATE_B_IDLE:
/* Start SRP ... OTG not required. */
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(mregs, MUSB_DEVCTL);
retries = 100;
while (!(devctl & MUSB_DEVCTL_SESSION)) {
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (retries-- < 1)
break;
}
retries = 10000;
while (devctl & MUSB_DEVCTL_SESSION) {
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (retries-- < 1)
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
otg_start_srp(musb->xceiv->otg);
spin_lock_irqsave(&musb->lock, flags);
/* Block idling for at least 1s */
musb_platform_try_idle(musb,
jiffies + msecs_to_jiffies(1 * HZ));
status = 0;
goto done;
default:
dev_dbg(musb->controller, "Unhandled wake: %s\n",
otg_state_string(musb->xceiv->state));
goto done;
}
status = 0;
power = musb_readb(mregs, MUSB_POWER);
power |= MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
dev_dbg(musb->controller, "issue wakeup\n");
/* FIXME do this next chunk in a timer callback, no udelay */
mdelay(2);
power = musb_readb(mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
done:
spin_unlock_irqrestore(&musb->lock, flags);
return status;
}
static int
musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
{
struct musb *musb = gadget_to_musb(gadget);
musb->is_self_powered = !!is_selfpowered;
return 0;
}
static void musb_pullup(struct musb *musb, int is_on)
{
u8 power;
power = musb_readb(musb->mregs, MUSB_POWER);
if (is_on)
power |= MUSB_POWER_SOFTCONN;
else
power &= ~MUSB_POWER_SOFTCONN;
/* FIXME if on, HdrcStart; if off, HdrcStop */
dev_dbg(musb->controller, "gadget D+ pullup %s\n",
is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
dev_dbg(musb->controller, "<= %s =>\n", __func__);
/*
* FIXME iff driver's softconnect flag is set (as it is during probe,
* though that can clear it), just musb_pullup().
*/
return -EINVAL;
}
#endif
static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct musb *musb = gadget_to_musb(gadget);
if (!musb->xceiv->set_power)
return -EOPNOTSUPP;
return usb_phy_set_power(musb->xceiv, mA);
}
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct musb *musb = gadget_to_musb(gadget);
unsigned long flags;
is_on = !!is_on;
pm_runtime_get_sync(musb->controller);
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
spin_lock_irqsave(&musb->lock, flags);
if (is_on != musb->softconnect) {
musb->softconnect = is_on;
musb_pullup(musb, is_on);
}
spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_put(musb->controller);
return 0;
}
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int musb_gadget_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
.set_selfpowered = musb_gadget_set_self_powered,
/* .vbus_session = musb_gadget_vbus_session, */
.vbus_draw = musb_gadget_vbus_draw,
.pullup = musb_gadget_pullup,
.udc_start = musb_gadget_start,
.udc_stop = musb_gadget_stop,
};
/* ----------------------------------------------------------------------- */
/* Registration */
/* Only this registration code "knows" the rule (from USB standards)
* about there being only one external upstream port. It assumes
* all peripheral ports are external...
*/
static void musb_gadget_release(struct device *dev)
{
/* kref_put(WHAT) */
dev_dbg(dev, "%s\n", __func__);
}
static void __devinit
init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
{
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
memset(ep, 0, sizeof *ep);
ep->current_epnum = epnum;
ep->musb = musb;
ep->hw_ep = hw_ep;
ep->is_in = is_in;
INIT_LIST_HEAD(&ep->req_list);
sprintf(ep->name, "ep%d%s", epnum,
(!epnum || hw_ep->is_shared_fifo) ? "" : (
is_in ? "in" : "out"));
ep->end_point.name = ep->name;
INIT_LIST_HEAD(&ep->end_point.ep_list);
if (!epnum) {
ep->end_point.maxpacket = 64;
ep->end_point.ops = &musb_g_ep0_ops;
musb->g.ep0 = &ep->end_point;
} else {
if (is_in)
ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
else
ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
ep->end_point.ops = &musb_ep_ops;
list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
}
}
/*
* Initialize the endpoints exposed to peripheral drivers, with backlinks
* to the rest of the driver state.
*/
static inline void __devinit musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
unsigned count = 0;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
for (epnum = 0, hw_ep = musb->endpoints;
epnum < musb->nr_endpoints;
epnum++, hw_ep++) {
if (hw_ep->is_shared_fifo /* || !epnum */) {
init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
count++;
} else {
if (hw_ep->max_packet_sz_tx) {
init_peripheral_ep(musb, &hw_ep->ep_in,
epnum, 1);
count++;
}
if (hw_ep->max_packet_sz_rx) {
init_peripheral_ep(musb, &hw_ep->ep_out,
epnum, 0);
count++;
}
}
}
}
/* called once during driver setup to initialize and link into
* the driver model; memory is zeroed.
*/
int __devinit musb_gadget_setup(struct musb *musb)
{
int status;
/* REVISIT minor race: if (erroneously) setting up two
* musb peripherals at the same time, only the bus lock
* is probably held.
*/
musb->g.ops = &musb_gadget_operations;
musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
/* this "gadget" abstracts/virtualizes the controller */
dev_set_name(&musb->g.dev, "gadget");
musb->g.dev.parent = musb->controller;
musb->g.dev.dma_mask = musb->controller->dma_mask;
musb->g.dev.release = musb_gadget_release;
musb->g.name = musb_driver_name;
if (is_otg_enabled(musb))
musb->g.is_otg = 1;
musb_g_init_endpoints(musb);
musb->is_active = 0;
musb_platform_try_idle(musb, 0);
status = device_register(&musb->g.dev);
if (status != 0) {
put_device(&musb->g.dev);
return status;
}
status = usb_add_gadget_udc(musb->controller, &musb->g);
if (status)
goto err;
return 0;
err:
musb->g.dev.parent = NULL;
device_unregister(&musb->g.dev);
return status;
}
void musb_gadget_cleanup(struct musb *musb)
{
usb_del_gadget_udc(&musb->g);
if (musb->g.dev.parent)
device_unregister(&musb->g.dev);
}
/*
* Register the gadget driver. Used by gadget drivers when
* registering themselves with the controller.
*
* -EINVAL something went wrong (not driver)
* -EBUSY another gadget is already using the controller
* -ENOMEM no memory to perform the operation
*
* @param driver the gadget driver
* @return <0 if error, 0 if everything is fine
*/
static int musb_gadget_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct musb *musb = gadget_to_musb(g);
struct usb_otg *otg = musb->xceiv->otg;
unsigned long flags;
int retval = -EINVAL;
if (driver->max_speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
dev_dbg(musb->controller, "registering driver %s\n", driver->function);
musb->softconnect = 0;
musb->gadget_driver = driver;
spin_lock_irqsave(&musb->lock, flags);
musb->is_active = 1;
otg_set_peripheral(otg, &musb->g);
musb->xceiv->state = OTG_STATE_B_IDLE;
/*
* FIXME this ignores the softconnect flag. Drivers are
* allowed hold the peripheral inactive until for example
* userspace hooks up printer hardware or DSP codecs, so
* hosts only see fully functional devices.
*/
if (!is_otg_enabled(musb))
musb_start(musb);
spin_unlock_irqrestore(&musb->lock, flags);
if (is_otg_enabled(musb)) {
struct usb_hcd *hcd = musb_to_hcd(musb);
dev_dbg(musb->controller, "OTG startup...\n");
/* REVISIT: funcall to other code, which also
* handles power budgeting ... this way also
* ensures HdrcStart is indirectly called.
*/
retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
if (retval < 0) {
dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
goto err2;
}
if ((musb->xceiv->last_event == USB_EVENT_ID)
&& otg->set_vbus)
otg_set_vbus(otg, 1);
hcd->self.uses_pio_for_control = 1;
}
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_put(musb->controller);
return 0;
err2:
if (!is_otg_enabled(musb))
musb_stop(musb);
err0:
return retval;
}
static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
int i;
struct musb_hw_ep *hw_ep;
/* don't disconnect if it's not connected */
if (musb->g.speed == USB_SPEED_UNKNOWN)
driver = NULL;
else
musb->g.speed = USB_SPEED_UNKNOWN;
/* deactivate the hardware */
if (musb->softconnect) {
musb->softconnect = 0;
musb_pullup(musb, 0);
}
musb_stop(musb);
/* killing any outstanding requests will quiesce the driver;
* then report disconnect
*/
if (driver) {
for (i = 0, hw_ep = musb->endpoints;
i < musb->nr_endpoints;
i++, hw_ep++) {
musb_ep_select(musb->mregs, i);
if (hw_ep->is_shared_fifo /* || !epnum */) {
nuke(&hw_ep->ep_in, -ESHUTDOWN);
} else {
if (hw_ep->max_packet_sz_tx)
nuke(&hw_ep->ep_in, -ESHUTDOWN);
if (hw_ep->max_packet_sz_rx)
nuke(&hw_ep->ep_out, -ESHUTDOWN);
}
}
}
}
/*
* Unregister the gadget driver. Used by gadget drivers when
* unregistering themselves from the controller.
*
* @param driver the gadget driver to unregister
*/
static int musb_gadget_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_get_sync(musb->controller);
/*
* REVISIT always use otg_set_peripheral() here too;
* this needs to shut down the OTG engine.
*/
spin_lock_irqsave(&musb->lock, flags);
musb_hnp_stop(musb);
(void) musb_gadget_vbus_draw(&musb->g, 0);
musb->xceiv->state = OTG_STATE_UNDEFINED;
stop_activity(musb, driver);
otg_set_peripheral(musb->xceiv->otg, NULL);
dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
musb->is_active = 0;
musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
if (is_otg_enabled(musb)) {
usb_remove_hcd(musb_to_hcd(musb));
/* FIXME we need to be able to register another
* gadget driver here and have everything work;
* that currently misbehaves.
*/
}
if (!is_otg_enabled(musb))
musb_stop(musb);
pm_runtime_put(musb->controller);
return 0;
}
/* ----------------------------------------------------------------------- */
/* lifecycle operations called through plat_uds.c */
void musb_g_resume(struct musb *musb)
{
musb->is_suspended = 0;
switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
musb->is_active = 1;
if (musb->gadget_driver && musb->gadget_driver->resume) {
spin_unlock(&musb->lock);
musb->gadget_driver->resume(&musb->g);
spin_lock(&musb->lock);
}
break;
default:
WARNING("unhandled RESUME transition (%s)\n",
otg_state_string(musb->xceiv->state));
}
}
/* called when SOF packets stop for 3+ msec */
void musb_g_suspend(struct musb *musb)
{
u8 devctl;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "devctl %02x\n", devctl);
switch (musb->xceiv->state) {
case OTG_STATE_B_IDLE:
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
break;
case OTG_STATE_B_PERIPHERAL:
musb->is_suspended = 1;
if (musb->gadget_driver && musb->gadget_driver->suspend) {
spin_unlock(&musb->lock);
musb->gadget_driver->suspend(&musb->g);
spin_lock(&musb->lock);
}
break;
default:
/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
* A_PERIPHERAL may need care too
*/
WARNING("unhandled SUSPEND transition (%s)\n",
otg_state_string(musb->xceiv->state));
}
}
/* Called during SRP */
void musb_g_wakeup(struct musb *musb)
{
musb_gadget_wakeup(&musb->g);
}
/* called when VBUS drops below session threshold, and in other cases */
void musb_g_disconnect(struct musb *musb)
{
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "devctl %02x\n", devctl);
/* clear HR */
musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
/* don't draw vbus until new b-default session */
(void) musb_gadget_vbus_draw(&musb->g, 0);
musb->g.speed = USB_SPEED_UNKNOWN;
if (musb->gadget_driver && musb->gadget_driver->disconnect) {
spin_unlock(&musb->lock);
musb->gadget_driver->disconnect(&musb->g);
spin_lock(&musb->lock);
}
switch (musb->xceiv->state) {
default:
dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
otg_state_string(musb->xceiv->state));
musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
break;
case OTG_STATE_A_PERIPHERAL:
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
MUSB_HST_MODE(musb);
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb->xceiv->state = OTG_STATE_B_IDLE;
break;
case OTG_STATE_B_SRP_INIT:
break;
}
musb->is_active = 0;
}
void musb_g_reset(struct musb *musb)
__releases(musb->lock)
__acquires(musb->lock)
{
void __iomem *mbase = musb->mregs;
u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
u8 power;
dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
(devctl & MUSB_DEVCTL_BDEVICE)
? "B-Device" : "A-Device",
musb_readb(mbase, MUSB_FADDR),
musb->gadget_driver
? musb->gadget_driver->driver.name
: NULL
);
/* report disconnect, if we didn't already (flushing EP state) */
if (musb->g.speed != USB_SPEED_UNKNOWN)
musb_g_disconnect(musb);
/* clear HR */
else if (devctl & MUSB_DEVCTL_HR)
musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
/* what speed did we negotiate? */
power = musb_readb(mbase, MUSB_POWER);
musb->g.speed = (power & MUSB_POWER_HSMODE)
? USB_SPEED_HIGH : USB_SPEED_FULL;
/* start in USB_STATE_DEFAULT */
musb->is_active = 1;
musb->is_suspended = 0;
MUSB_DEV_MODE(musb);
musb->address = 0;
musb->ep0_state = MUSB_EP0_STAGE_SETUP;
musb->may_wakeup = 0;
musb->g.b_hnp_enable = 0;
musb->g.a_alt_hnp_support = 0;
musb->g.a_hnp_support = 0;
/* Normal reset, as B-Device;
* or else after HNP, as A-Device
*/
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb->g.is_a_peripheral = 0;
} else if (is_otg_enabled(musb)) {
musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
} else
WARN_ON(1);
/* start with default limits on VBUS power draw */
(void) musb_gadget_vbus_draw(&musb->g,
is_otg_enabled(musb) ? 8 : 100);
}
| gpl-2.0 |
ptmr3/smdk4412 | drivers/scsi/u14-34f.c | 3038 | 67067 | /*
* u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
*
* 03 Jun 2003 Rev. 8.10 for linux-2.5.70
* + Update for new IRQ API.
* + Use "goto" when appropriate.
* + Drop u14-34f.h.
* + Update for new module_param API.
* + Module parameters can now be specified only in the
* same format as the kernel boot options.
*
* boot option old module param
* ----------- ------------------
* addr,... io_port=addr,...
* lc:[y|n] linked_comm=[1|0]
* mq:xx max_queue_depth=xx
* tm:[0|1|2] tag_mode=[0|1|2]
* et:[y|n] ext_tran=[1|0]
* of:[y|n] have_old_firmware=[1|0]
*
* A valid example using the new parameter format is:
* modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4"
*
* which is equivalent to the old format:
* modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \
* max_queue_depth=4
*
* With actual module code, u14-34f and u14_34f are equivalent
* as module parameter names.
*
* 12 Feb 2003 Rev. 8.04 for linux 2.5.60
* + Release irq before calling scsi_register.
*
* 12 Nov 2002 Rev. 8.02 for linux 2.5.47
* + Release driver_lock before calling scsi_register.
*
* 11 Nov 2002 Rev. 8.01 for linux 2.5.47
* + Fixed bios_param and scsicam_bios_param calling parameters.
*
* 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4
* + Use new tcq and adjust_queue_depth api.
* + New command line option (tm:[0-2]) to choose the type of tags:
* 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags.
* Default is tm:0 (tagged commands disabled).
* For compatibility the "tc:" option is an alias of the "tm:"
* option; tc:n is equivalent to tm:0 and tc:y is equivalent to
* tm:1.
*
* 10 Oct 2002 Rev. 7.70 for linux 2.5.42
* + Foreport from revision 6.70.
*
* 25 Jun 2002 Rev. 6.70 for linux 2.4.19
* + Fixed endian-ness problem due to bitfields.
*
* 21 Feb 2002 Rev. 6.52 for linux 2.4.18
* + Backport from rev. 7.22 (use io_request_lock).
*
* 20 Feb 2002 Rev. 7.22 for linux 2.5.5
* + Remove any reference to virt_to_bus().
* + Fix pio hang while detecting multiple HBAs.
*
* 01 Jan 2002 Rev. 7.20 for linux 2.5.1
* + Use the dynamic DMA mapping API.
*
* 19 Dec 2001 Rev. 7.02 for linux 2.5.1
* + Use SCpnt->sc_data_direction if set.
* + Use sglist.page instead of sglist.address.
*
* 11 Dec 2001 Rev. 7.00 for linux 2.5.1
* + Use host->host_lock instead of io_request_lock.
*
* 1 May 2001 Rev. 6.05 for linux 2.4.4
* + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d)
*
* 25 Jan 2001 Rev. 6.03 for linux 2.4.0
* + "check_region" call replaced by "request_region".
*
* 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11
* + Removed old scsi error handling support.
* + The obsolete boot option flag eh:n is silently ignored.
* + Removed error messages while a disk drive is powered up at
* boot time.
* + Improved boot messages: all tagged capable device are
* indicated as "tagged".
*
* 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18
* + Updated to the new __setup interface for boot command line options.
* + When loaded as a module, accepts the new parameter boot_options
* which value is a string with the same format of the kernel boot
* command line options. A valid example is:
* modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"'
*
* 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11
* + Removed pre-2.2 source code compatibility.
*
* 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
* Added command line option (et:[y|n]) to use the existing
* translation (returned by scsicam_bios_param) as disk geometry.
* The default is et:n, which uses the disk geometry jumpered
* on the board.
* The default value et:n is compatible with all previous revisions
* of this driver.
*
* 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
* Increased busy timeout from 10 msec. to 200 msec. while
* processing interrupts.
*
* 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
* Improved abort handling during the eh recovery process.
*
* 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
* The driver is now fully SMP safe, including the
* abort and reset routines.
* Added command line options (eh:[y|n]) to choose between
* new_eh_code and the old scsi code.
* If linux version >= 2.1.101 the default is eh:y, while the eh
* option is ignored for previous releases and the old scsi code
* is used.
*
* 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
* Reworked interrupt handler.
*
* 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
* Major reliability improvement: when a batch with overlapping
* requests is detected, requests are queued one at a time
* eliminating any possible board or drive reordering.
*
* 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
* Improved SMP support (if linux version >= 2.1.95).
*
* 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
* Performance improvement: when sequential i/o is detected,
* always use direct sort instead of reverse sort.
*
* 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
* io_port is now unsigned long.
*
* 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
* Use new scsi error handling code (if linux version >= 2.1.88).
* Use new interrupt code.
*
* 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
* Use of udelay inside the wait loops to avoid timeout
* problems with fast cpus.
* Removed check about useless calls to the interrupt service
* routine (reported on SMP systems only).
* At initialization time "sorted/unsorted" is displayed instead
* of "linked/unlinked" to reinforce the fact that "linking" is
* nothing but "elevator sorting" in the actual implementation.
*
* 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
* Use of serial_number_at_timeout in abort and reset processing.
* Use of the __initfunc and __initdata macro in setup code.
* Minor cleanups in the list_statistics code.
*
* 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
* When loading as a module, parameter passing is now supported
* both in 2.0 and in 2.1 style.
* Fixed data transfer direction for some SCSI opcodes.
* Immediate acknowledge to request sense commands.
* Linked commands to each disk device are now reordered by elevator
* sorting. Rare cases in which reordering of write requests could
* cause wrong results are managed.
*
* 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
* Added command line options to enable/disable linked commands
* (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
* queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
* Improved command linking.
*
* 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
* Added linked command support.
*
* 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
* Added queue depth adjustment.
*
* 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
* The list of i/o ports to be probed can be overwritten by the
* "u14-34f=port0,port1,...." boot command line option.
* Scatter/gather lists are now allocated by a number of kmalloc
* calls, in order to avoid the previous size limit of 64Kb.
*
* 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
* Added multichannel support.
*
* 27 Sep 1996 rev. 2.12 for linux 2.1.0
* Portability cleanups (virtual/bus addressing, little/big endian
* support).
*
* 09 Jul 1996 rev. 2.11 for linux 2.0.4
* "Data over/under-run" no longer implies a redo on all targets.
* Number of internal retries is now limited.
*
* 16 Apr 1996 rev. 2.10 for linux 1.3.90
* New argument "reset_flags" to the reset routine.
*
* 21 Jul 1995 rev. 2.02 for linux 1.3.11
* Fixed Data Transfer Direction for some SCSI commands.
*
* 13 Jun 1995 rev. 2.01 for linux 1.2.10
* HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
* the firmware prom is not the latest one (28008-006).
*
* 11 Mar 1995 rev. 2.00 for linux 1.2.0
* Fixed a bug which prevented media change detection for removable
* disk drives.
*
* 23 Feb 1995 rev. 1.18 for linux 1.1.94
* Added a check for scsi_register returning NULL.
*
* 11 Feb 1995 rev. 1.17 for linux 1.1.91
* U14F qualified to run with 32 sglists.
* Now DEBUG_RESET is disabled by default.
*
* 9 Feb 1995 rev. 1.16 for linux 1.1.90
* Use host->wish_block instead of host->block.
*
* 8 Feb 1995 rev. 1.15 for linux 1.1.89
* Cleared target_time_out counter while performing a reset.
*
* 28 Jan 1995 rev. 1.14 for linux 1.1.86
* Added module support.
* Log and do a retry when a disk drive returns a target status
* different from zero on a recovered error.
* Auto detects if U14F boards have an old firmware revision.
* Max number of scatter/gather lists set to 16 for all boards
* (most installation run fine using 33 sglists, while other
* has problems when using more than 16).
*
* 16 Jan 1995 rev. 1.13 for linux 1.1.81
* Display a message if check_region detects a port address
* already in use.
*
* 15 Dec 1994 rev. 1.12 for linux 1.1.74
* The host->block flag is set for all the detected ISA boards.
*
* 30 Nov 1994 rev. 1.11 for linux 1.1.68
* Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
* Added optional support for using a single board at a time.
*
* 14 Nov 1994 rev. 1.10 for linux 1.1.63
*
* 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
* 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
*
* This driver is a total replacement of the original UltraStor
* scsi driver, but it supports ONLY the 14F and 34F boards.
* It can be configured in the same kernel in which the original
* ultrastor driver is configured to allow the original U24F
* support.
*
* Multiple U14F and/or U34F host adapters are supported.
*
* Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com)
*
* Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistributions of source
* code retain the above copyright notice and this comment without
* modification.
*
* WARNING: if your 14/34F board has an old firmware revision (see below)
* you must change "#undef" into "#define" in the following
* statement.
*/
#undef HAVE_OLD_UX4F_FIRMWARE
/*
* The UltraStor 14F, 24F, and 34F are a family of intelligent, high
* performance SCSI-2 host adapters.
* Here is the scoop on the various models:
*
* 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
* 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
* 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
*
* This code has been tested with up to two U14F boards, using both
* firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
* 28004-006/38004-005 (BIOS rev. 2.01).
*
* The latest firmware is required in order to get reliable operations when
* clustering is enabled. ENABLE_CLUSTERING provides a performance increase
* up to 50% on sequential access.
*
* Since the struct scsi_host_template structure is shared among all 14F and 34F,
* the last setting of use_clustering is in effect for all of these boards.
*
* Here a sample configuration using two U14F boards:
*
U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
*
* The boot controller must have its BIOS enabled, while other boards can
* have their BIOS disabled, or enabled to an higher address.
* Boards are named Ux4F0, Ux4F1..., according to the port address order in
* the io_port[] array.
*
* The following facts are based on real testing results (not on
* documentation) on the above U14F board.
*
* - The U14F board should be jumpered for bus on time less or equal to 7
* microseconds, while the default is 11 microseconds. This is order to
* get acceptable performance while using floppy drive and hard disk
* together. The jumpering for 7 microseconds is: JP13 pin 15-16,
* JP14 pin 7-8 and pin 9-10.
* The reduction has a little impact on scsi performance.
*
* - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
* from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
*
* - If U14F on board firmware is older than 28004-006/38004-005,
* the U14F board is unable to provide reliable operations if the scsi
* request length exceeds 16Kbyte. When this length is exceeded the
* behavior is:
* - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
* - adapter_status equal 0 and target_status equal 2 on for all targets
* in the next operation following the reset.
* This sequence takes a long time (>3 seconds), so in the meantime
* the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
* (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
* Because of this I had to DISABLE_CLUSTERING and to work around the
* bus reset in the interrupt service routine, returning DID_BUS_BUSY
* so that the operations are retried without complains from the scsi.c
* code.
* Any reset of the scsi bus is going to kill tape operations, since
* no retry is allowed for tapes. Bus resets are more likely when the
* scsi bus is under heavy load.
* Requests using scatter/gather have a maximum length of 16 x 1024 bytes
* when DISABLE_CLUSTERING is in effect, but unscattered requests could be
* larger than 16Kbyte.
*
* The new firmware has fixed all the above problems.
*
* For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
* the latest firmware prom is 28008-006. Older firmware 28008-005 has
* problems when using more than 16 scatter/gather lists.
*
* The list of i/o ports to be probed can be totally replaced by the
* boot command line option: "u14-34f=port0,port1,port2,...", where the
* port0, port1... arguments are ISA/VESA addresses to be probed.
* For example using "u14-34f=0x230,0x340", the driver probes only the two
* addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
* this driver.
*
* After the optional list of detection probes, other possible command line
* options are:
*
* et:y use disk geometry returned by scsicam_bios_param;
* et:n use disk geometry jumpered on the board;
* lc:y enables linked commands;
* lc:n disables linked commands;
* tm:0 disables tagged commands (same as tc:n);
* tm:1 use simple queue tags (same as tc:y);
* tm:2 use ordered queue tags (same as tc:2);
* of:y enables old firmware support;
* of:n disables old firmware support;
* mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
*
* The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n".
* An example using the list of detection probes could be:
* "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n".
*
* When loading as a module, parameters can be specified as well.
* The above example would be (use 1 in place of y and 0 in place of n):
*
* modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
* max_queue_depth=4 ext_tran=0 tag_mode=2
*
* ----------------------------------------------------------------------------
* In this implementation, linked commands are designed to work with any DISK
* or CD-ROM, since this linking has only the intent of clustering (time-wise)
* and reordering by elevator sorting commands directed to each device,
* without any relation with the actual SCSI protocol between the controller
* and the device.
* If Q is the queue depth reported at boot time for each device (also named
* cmds/lun) and Q > 2, whenever there is already an active command to the
* device all other commands to the same device (up to Q-1) are kept waiting
* in the elevator sorting queue. When the active command completes, the
* commands in this queue are sorted by sector address. The sort is chosen
* between increasing or decreasing by minimizing the seek distance between
* the sector of the commands just completed and the sector of the first
* command in the list to be sorted.
* Trivial math assures that the unsorted average seek distance when doing
* random seeks over S sectors is S/3.
* When (Q-1) requests are uniformly distributed over S sectors, the average
* distance between two adjacent requests is S/((Q-1) + 1), so the sorted
* average seek distance for (Q-1) random requests over S sectors is S/Q.
* The elevator sorting hence divides the seek distance by a factor Q/3.
* The above pure geometric remarks are valid in all cases and the
* driver effectively reduces the seek distance by the predicted factor
* when there are Q concurrent read i/o operations on the device, but this
* does not necessarily results in a noticeable performance improvement:
* your mileage may vary....
*
* Note: command reordering inside a batch of queued commands could cause
* wrong results only if there is at least one write request and the
* intersection (sector-wise) of all requests is not empty.
* When the driver detects a batch including overlapping requests
* (a really rare event) strict serial (pid) order is enforced.
* ----------------------------------------------------------------------------
*
* The boards are named Ux4F0, Ux4F1,... according to the detection order.
*
* In order to support multiple ISA boards in a reliable way,
* the driver sets host->wish_block = TRUE for all ISA boards.
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/stat.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
static int u14_34f_detect(struct scsi_host_template *);
static int u14_34f_release(struct Scsi_Host *);
static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static int u14_34f_eh_abort(struct scsi_cmnd *);
static int u14_34f_eh_host_reset(struct scsi_cmnd *);
static int u14_34f_bios_param(struct scsi_device *, struct block_device *,
sector_t, int *);
static int u14_34f_slave_configure(struct scsi_device *);
static struct scsi_host_template driver_template = {
.name = "UltraStor 14F/34F rev. 8.10.00 ",
.detect = u14_34f_detect,
.release = u14_34f_release,
.queuecommand = u14_34f_queuecommand,
.eh_abort_handler = u14_34f_eh_abort,
.eh_host_reset_handler = u14_34f_eh_host_reset,
.bios_param = u14_34f_bios_param,
.slave_configure = u14_34f_slave_configure,
.this_id = 7,
.unchecked_isa_dma = 1,
.use_clustering = ENABLE_CLUSTERING,
};
#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
#error "Adjust your <asm/byteorder.h> defines"
#endif
/* Values for the PRODUCT_ID ports for the 14/34F */
#define PRODUCT_ID1 0x56
#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
/* Subversion values */
#define ISA 0
#define ESA 1
#define OP_HOST_ADAPTER 0x1
#define OP_SCSI 0x2
#define OP_RESET 0x4
#define DTD_SCSI 0x0
#define DTD_IN 0x1
#define DTD_OUT 0x2
#define DTD_NONE 0x3
#define HA_CMD_INQUIRY 0x1
#define HA_CMD_SELF_DIAG 0x2
#define HA_CMD_READ_BUFF 0x3
#define HA_CMD_WRITE_BUFF 0x4
#undef DEBUG_LINKED_COMMANDS
#undef DEBUG_DETECT
#undef DEBUG_INTERRUPT
#undef DEBUG_RESET
#undef DEBUG_GENERATE_ERRORS
#undef DEBUG_GENERATE_ABORTS
#undef DEBUG_GEOMETRY
#define MAX_ISA 3
#define MAX_VESA 1
#define MAX_EISA 0
#define MAX_PCI 0
#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
#define MAX_CHANNEL 1
#define MAX_LUN 8
#define MAX_TARGET 8
#define MAX_MAILBOXES 16
#define MAX_SGLIST 32
#define MAX_SAFE_SGLIST 16
#define MAX_INTERNAL_RETRIES 64
#define MAX_CMD_PER_LUN 2
#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
#define SKIP ULONG_MAX
#define FALSE 0
#define TRUE 1
#define FREE 0
#define IN_USE 1
#define LOCKED 2
#define IN_RESET 3
#define IGNORE 4
#define READY 5
#define ABORTING 6
#define NO_DMA 0xff
#define MAXLOOP 10000
#define TAG_DISABLED 0
#define TAG_SIMPLE 1
#define TAG_ORDERED 2
#define REG_LCL_MASK 0
#define REG_LCL_INTR 1
#define REG_SYS_MASK 2
#define REG_SYS_INTR 3
#define REG_PRODUCT_ID1 4
#define REG_PRODUCT_ID2 5
#define REG_CONFIG1 6
#define REG_CONFIG2 7
#define REG_OGM 8
#define REG_ICM 12
#define REGION_SIZE 13UL
#define BSY_ASSERTED 0x01
#define IRQ_ASSERTED 0x01
#define CMD_RESET 0xc0
#define CMD_OGM_INTR 0x01
#define CMD_CLR_INTR 0x01
#define CMD_ENA_INTR 0x81
#define ASOK 0x00
#define ASST 0x91
#define YESNO(a) ((a) ? 'y' : 'n')
#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
#define PACKED __attribute__((packed))
struct sg_list {
unsigned int address; /* Segment Address */
unsigned int num_bytes; /* Segment Length */
};
/* MailBox SCSI Command Packet */
struct mscp {
#if defined(__BIG_ENDIAN_BITFIELD)
unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3;
unsigned char lun: 3, channel:2, target:3;
#else
unsigned char opcode: 3, /* type of command */
xdir: 2, /* data transfer direction */
dcn: 1, /* disable disconnect */
ca: 1, /* use cache (if available) */
sg: 1; /* scatter/gather operation */
unsigned char target: 3, /* SCSI target id */
channel: 2, /* SCSI channel number */
lun: 3; /* SCSI logical unit number */
#endif
unsigned int data_address PACKED; /* transfer data pointer */
unsigned int data_len PACKED; /* length in bytes */
unsigned int link_address PACKED; /* for linking command chains */
unsigned char clink_id; /* identifies command in chain */
unsigned char use_sg; /* (if sg is set) 8 bytes per list */
unsigned char sense_len;
unsigned char cdb_len; /* 6, 10, or 12 */
unsigned char cdb[12]; /* SCSI Command Descriptor Block */
unsigned char adapter_status; /* non-zero indicates HA error */
unsigned char target_status; /* non-zero indicates target error */
unsigned int sense_addr PACKED;
/* Additional fields begin here. */
struct scsi_cmnd *SCpnt;
unsigned int cpp_index; /* cp index */
/* All the cp structure is zero filled by queuecommand except the
following CP_TAIL_SIZE bytes, initialized by detect */
dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
struct sg_list *sglist; /* pointer to the allocated SG list */
};
#define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t))
struct hostdata {
struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
unsigned int last_cp_used; /* Index of last mailbox used */
unsigned int iocount; /* Total i/o done for this board */
int board_number; /* Number of this board */
char board_name[16]; /* Name of this board */
int in_reset; /* True if board is doing a reset */
int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
unsigned int retries; /* Number of internal retries */
unsigned long last_retried_pid; /* Pid of last retried command */
unsigned char subversion; /* Bus type, either ISA or ESA */
struct pci_dev *pdev; /* Always NULL */
unsigned char heads;
unsigned char sectors;
char board_id[256]; /* data from INQUIRY on this board */
};
static struct Scsi_Host *sh[MAX_BOARDS + 1];
static const char *driver_name = "Ux4F";
static char sha[MAX_BOARDS];
static DEFINE_SPINLOCK(driver_lock);
/* Initialize num_boards so that ihdlr can work while detect is in progress */
static unsigned int num_boards = MAX_BOARDS;
static unsigned long io_port[] = {
/* Space for MAX_INT_PARAM ports usable while loading as a module */
SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
SKIP, SKIP,
/* Possible ISA/VESA ports */
0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
/* End of list */
0x0
};
#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
#define BN(board) (HD(board)->board_name)
/* Device is Little Endian */
#define H2DEV(x) cpu_to_le32(x)
#define DEV2H(x) le32_to_cpu(x)
static irqreturn_t do_interrupt_handler(int, void *);
static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int);
static int do_trace = FALSE;
static int setup_done = FALSE;
static int link_statistics;
static int ext_tran = FALSE;
#if defined(HAVE_OLD_UX4F_FIRMWARE)
static int have_old_firmware = TRUE;
#else
static int have_old_firmware = FALSE;
#endif
#if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE)
static int tag_mode = TAG_SIMPLE;
#else
static int tag_mode = TAG_DISABLED;
#endif
#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
static int linked_comm = TRUE;
#else
static int linked_comm = FALSE;
#endif
#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
#else
static int max_queue_depth = MAX_CMD_PER_LUN;
#endif
#define MAX_INT_PARAM 10
#define MAX_BOOT_OPTIONS_SIZE 256
static char boot_options[MAX_BOOT_OPTIONS_SIZE];
#if defined(MODULE)
#include <linux/module.h>
#include <linux/moduleparam.h>
module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0);
MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \
"option." \
" Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\"");
MODULE_AUTHOR("Dario Ballabio");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver");
#endif
static int u14_34f_slave_configure(struct scsi_device *dev) {
int j, tqd, utqd;
char *tag_suffix, *link_suffix;
struct Scsi_Host *host = dev->host;
j = ((struct hostdata *) host->hostdata)->board_number;
utqd = MAX_CMD_PER_LUN;
tqd = max_queue_depth;
if (TLDEV(dev->type) && dev->tagged_supported)
if (tag_mode == TAG_SIMPLE) {
scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd);
tag_suffix = ", simple tags";
}
else if (tag_mode == TAG_ORDERED) {
scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd);
tag_suffix = ", ordered tags";
}
else {
scsi_adjust_queue_depth(dev, 0, tqd);
tag_suffix = ", no tags";
}
else if (TLDEV(dev->type) && linked_comm) {
scsi_adjust_queue_depth(dev, 0, tqd);
tag_suffix = ", untagged";
}
else {
scsi_adjust_queue_depth(dev, 0, utqd);
tag_suffix = "";
}
if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
link_suffix = ", sorted";
else if (TLDEV(dev->type))
link_suffix = ", unsorted";
else
link_suffix = "";
sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n",
dev->queue_depth, link_suffix, tag_suffix);
return FALSE;
}
static int wait_on_busy(unsigned long iobase, unsigned int loop) {
while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
udelay(1L);
if (--loop == 0) return TRUE;
}
return FALSE;
}
static int board_inquiry(unsigned int j) {
struct mscp *cpp;
dma_addr_t id_dma_addr;
unsigned int limit = 0;
unsigned long time;
id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
cpp = &HD(j)->cp[0];
cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp),
PCI_DMA_BIDIRECTIONAL);
memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
cpp->opcode = OP_HOST_ADAPTER;
cpp->xdir = DTD_IN;
cpp->data_address = H2DEV(id_dma_addr);
cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
cpp->cdb_len = 6;
cpp->cdb[0] = HA_CMD_INQUIRY;
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
printk("%s: board_inquiry, adapter busy.\n", BN(j));
return TRUE;
}
HD(j)->cp_stat[0] = IGNORE;
/* Clear the interrupt indication */
outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
/* Store pointer in OGM address bytes */
outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
/* Issue OGM interrupt */
outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
spin_unlock_irq(&driver_lock);
time = jiffies;
while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
spin_lock_irq(&driver_lock);
if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
HD(j)->cp_stat[0] = FREE;
printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
return TRUE;
}
pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp),
PCI_DMA_BIDIRECTIONAL);
pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id),
PCI_DMA_BIDIRECTIONAL);
return FALSE;
}
static int port_detect \
(unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) {
unsigned char irq, dma_channel, subversion, i;
unsigned char in_byte;
char *bus_type, dma_name[16];
/* Allowed BIOS base addresses (NULL indicates reserved) */
unsigned long bios_segment_table[8] = {
0,
0xc4000, 0xc8000, 0xcc000, 0xd0000,
0xd4000, 0xd8000, 0xdc000
};
/* Allowed IRQs */
unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
/* Allowed DMA channels for ISA (0 indicates reserved) */
unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
/* Head/sector mappings */
struct {
unsigned char heads;
unsigned char sectors;
} mapping_table[4] = {
{ 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
};
struct config_1 {
#if defined(__BIG_ENDIAN_BITFIELD)
unsigned char dma_channel: 2, interrupt:2,
removable_disks_as_fixed:1, bios_segment: 3;
#else
unsigned char bios_segment: 3, removable_disks_as_fixed: 1,
interrupt: 2, dma_channel: 2;
#endif
} config_1;
struct config_2 {
#if defined(__BIG_ENDIAN_BITFIELD)
unsigned char tfr_port: 2, bios_drive_number: 1,
mapping_mode: 2, ha_scsi_id: 3;
#else
unsigned char ha_scsi_id: 3, mapping_mode: 2,
bios_drive_number: 1, tfr_port: 2;
#endif
} config_2;
char name[16];
sprintf(name, "%s%d", driver_name, j);
if (!request_region(port_base, REGION_SIZE, driver_name)) {
#if defined(DEBUG_DETECT)
printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
#endif
goto fail;
}
spin_lock_irq(&driver_lock);
if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock;
in_byte = inb(port_base + REG_PRODUCT_ID2);
if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock;
*(char *)&config_1 = inb(port_base + REG_CONFIG1);
*(char *)&config_2 = inb(port_base + REG_CONFIG2);
irq = interrupt_table[config_1.interrupt];
dma_channel = dma_channel_table[config_1.dma_channel];
subversion = (in_byte & 0x0f);
/* Board detected, allocate its IRQ */
if (request_irq(irq, do_interrupt_handler,
IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0),
driver_name, (void *) &sha[j])) {
printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
goto freelock;
}
if (subversion == ISA && request_dma(dma_channel, driver_name)) {
printk("%s: unable to allocate DMA channel %u, detaching.\n",
name, dma_channel);
goto freeirq;
}
if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
spin_unlock_irq(&driver_lock);
sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
spin_lock_irq(&driver_lock);
if (sh[j] == NULL) {
printk("%s: unable to register host, detaching.\n", name);
goto freedma;
}
sh[j]->io_port = port_base;
sh[j]->unique_id = port_base;
sh[j]->n_io_port = REGION_SIZE;
sh[j]->base = bios_segment_table[config_1.bios_segment];
sh[j]->irq = irq;
sh[j]->sg_tablesize = MAX_SGLIST;
sh[j]->this_id = config_2.ha_scsi_id;
sh[j]->can_queue = MAX_MAILBOXES;
sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
#if defined(DEBUG_DETECT)
{
unsigned char sys_mask, lcl_mask;
sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
}
#endif
/* Probably a bogus host scsi id, set it to the dummy value */
if (sh[j]->this_id == 0) sh[j]->this_id = -1;
/* If BIOS is disabled, force enable interrupts */
if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
memset(HD(j), 0, sizeof(struct hostdata));
HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
HD(j)->subversion = subversion;
HD(j)->pdev = NULL;
HD(j)->board_number = j;
if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
if (HD(j)->subversion == ESA) {
sh[j]->unchecked_isa_dma = FALSE;
sh[j]->dma_channel = NO_DMA;
sprintf(BN(j), "U34F%d", j);
bus_type = "VESA";
}
else {
unsigned long flags;
sh[j]->unchecked_isa_dma = TRUE;
flags=claim_dma_lock();
disable_dma(dma_channel);
clear_dma_ff(dma_channel);
set_dma_mode(dma_channel, DMA_MODE_CASCADE);
enable_dma(dma_channel);
release_dma_lock(flags);
sh[j]->dma_channel = dma_channel;
sprintf(BN(j), "U14F%d", j);
bus_type = "ISA";
}
sh[j]->max_channel = MAX_CHANNEL - 1;
sh[j]->max_id = MAX_TARGET;
sh[j]->max_lun = MAX_LUN;
if (HD(j)->subversion == ISA && !board_inquiry(j)) {
HD(j)->board_id[40] = 0;
if (strcmp(&HD(j)->board_id[32], "06000600")) {
printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
BN(j), &HD(j)->board_id[32]);
sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
}
}
if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
else sprintf(dma_name, "DMA %u", dma_channel);
spin_unlock_irq(&driver_lock);
for (i = 0; i < sh[j]->can_queue; i++)
HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev,
&HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < sh[j]->can_queue; i++)
if (! ((&HD(j)->cp[i])->sglist = kmalloc(
sh[j]->sg_tablesize * sizeof(struct sg_list),
(sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
goto release;
}
if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE)
tag_mode = TAG_ORDERED;
if (j == 0) {
printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n");
printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n",
driver_name, YESNO(have_old_firmware), tag_mode,
YESNO(linked_comm), max_queue_depth, YESNO(ext_tran));
}
printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
BN(j), sh[j]->max_id, sh[j]->max_lun);
for (i = 0; i <= sh[j]->max_channel; i++)
printk("%s: SCSI channel %u enabled, host target ID %d.\n",
BN(j), i, sh[j]->this_id);
return TRUE;
freedma:
if (subversion == ISA) free_dma(dma_channel);
freeirq:
free_irq(irq, &sha[j]);
freelock:
spin_unlock_irq(&driver_lock);
release_region(port_base, REGION_SIZE);
fail:
return FALSE;
release:
u14_34f_release(sh[j]);
return FALSE;
}
static void internal_setup(char *str, int *ints) {
int i, argc = ints[0];
char *cur = str, *pc;
if (argc > 0) {
if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
io_port[i] = 0;
setup_done = TRUE;
}
while (cur && (pc = strchr(cur, ':'))) {
int val = 0, c = *++pc;
if (c == 'n' || c == 'N') val = FALSE;
else if (c == 'y' || c == 'Y') val = TRUE;
else val = (int) simple_strtoul(pc, NULL, 0);
if (!strncmp(cur, "lc:", 3)) linked_comm = val;
else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
else if (!strncmp(cur, "tc:", 3)) tag_mode = val;
else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
else if (!strncmp(cur, "et:", 3)) ext_tran = val;
if ((cur = strchr(cur, ','))) ++cur;
}
return;
}
static int option_setup(char *str) {
int ints[MAX_INT_PARAM];
char *cur = str;
int i = 1;
while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
}
ints[0] = i - 1;
internal_setup(cur, ints);
return 1;
}
static int u14_34f_detect(struct scsi_host_template *tpnt) {
unsigned int j = 0, k;
tpnt->proc_name = "u14-34f";
if(strlen(boot_options)) option_setup(boot_options);
#if defined(MODULE)
/* io_port could have been modified when loading as a module */
if(io_port[0] != SKIP) {
setup_done = TRUE;
io_port[MAX_INT_PARAM] = 0;
}
#endif
for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
for (k = 0; io_port[k]; k++) {
if (io_port[k] == SKIP) continue;
if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
}
num_boards = j;
return j;
}
static void map_dma(unsigned int i, unsigned int j) {
unsigned int data_len = 0;
unsigned int k, pci_dir;
int count;
struct scatterlist *sg;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
pci_dir = SCpnt->sc_data_direction;
if (SCpnt->sense_buffer)
cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
if (scsi_bufflen(SCpnt)) {
count = scsi_dma_map(SCpnt);
BUG_ON(count < 0);
scsi_for_each_sg(SCpnt, sg, count, k) {
cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
data_len += sg->length;
}
cpp->sg = TRUE;
cpp->use_sg = scsi_sg_count(SCpnt);
cpp->data_address =
H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
cpp->use_sg * sizeof(struct sg_list),
pci_dir));
cpp->data_len = H2DEV(data_len);
} else {
pci_dir = PCI_DMA_BIDIRECTIONAL;
cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
}
}
static void unmap_dma(unsigned int i, unsigned int j) {
unsigned int pci_dir;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
pci_dir = SCpnt->sc_data_direction;
if (DEV2H(cpp->sense_addr))
pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
scsi_dma_unmap(SCpnt);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
if (DEV2H(cpp->data_address))
pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address),
DEV2H(cpp->data_len), pci_dir);
}
static void sync_dma(unsigned int i, unsigned int j) {
unsigned int pci_dir;
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
pci_dir = SCpnt->sc_data_direction;
if (DEV2H(cpp->sense_addr))
pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
if (scsi_sg_count(SCpnt))
pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
scsi_sg_count(SCpnt), pci_dir);
if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
if (DEV2H(cpp->data_address))
pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address),
DEV2H(cpp->data_len), pci_dir);
}
static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
unsigned int k;
static const unsigned char data_out_cmds[] = {
0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d
};
static const unsigned char data_none_cmds[] = {
0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00
};
struct mscp *cpp;
struct scsi_cmnd *SCpnt;
cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
cpp->xdir = DTD_IN;
return;
}
else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
cpp->xdir = DTD_OUT;
return;
}
else if (SCpnt->sc_data_direction == DMA_NONE) {
cpp->xdir = DTD_NONE;
return;
}
if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL)
panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j));
cpp->xdir = DTD_IN;
for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
if (SCpnt->cmnd[0] == data_out_cmds[k]) {
cpp->xdir = DTD_OUT;
break;
}
if (cpp->xdir == DTD_IN)
for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
if (SCpnt->cmnd[0] == data_none_cmds[k]) {
cpp->xdir = DTD_NONE;
break;
}
}
static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
unsigned int i, j, k;
struct mscp *cpp;
/* j is the board number */
j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
if (SCpnt->host_scribble)
panic("%s: qcomm, SCpnt %p already active.\n",
BN(j), SCpnt);
/* i is the mailbox number, look for the first free mailbox
starting from last_cp_used */
i = HD(j)->last_cp_used + 1;
for (k = 0; k < sh[j]->can_queue; k++, i++) {
if (i >= sh[j]->can_queue) i = 0;
if (HD(j)->cp_stat[i] == FREE) {
HD(j)->last_cp_used = i;
break;
}
}
if (k == sh[j]->can_queue) {
printk("%s: qcomm, no free mailbox.\n", BN(j));
return 1;
}
/* Set pointer to control packet structure */
cpp = &HD(j)->cp[i];
memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
SCpnt->scsi_done = done;
cpp->cpp_index = i;
SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n",
BN(j), i, SCpnt->device->channel, SCpnt->device->id,
SCpnt->device->lun);
cpp->opcode = OP_SCSI;
cpp->channel = SCpnt->device->channel;
cpp->target = SCpnt->device->id;
cpp->lun = SCpnt->device->lun;
cpp->SCpnt = SCpnt;
cpp->cdb_len = SCpnt->cmd_len;
memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
/* Use data transfer direction SCpnt->sc_data_direction */
scsi_to_dev_dir(i, j);
/* Map DMA buffers and SG list */
map_dma(i, j);
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type)) {
HD(j)->cp_stat[i] = READY;
flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
return 0;
}
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
unmap_dma(i, j);
SCpnt->host_scribble = NULL;
scmd_printk(KERN_INFO, SCpnt,
"qcomm, adapter busy.\n");
return 1;
}
/* Store pointer in OGM address bytes */
outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
/* Issue OGM interrupt */
outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
HD(j)->cp_stat[i] = IN_USE;
return 0;
}
static DEF_SCSI_QCMD(u14_34f_queuecommand)
static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
unsigned int i, j;
j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
if (SCarg->host_scribble == NULL) {
scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
return SUCCESS;
}
i = *(unsigned int *)SCarg->host_scribble;
scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
if (i >= sh[j]->can_queue)
panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
printk("%s: abort, timeout error.\n", BN(j));
return FAILED;
}
if (HD(j)->cp_stat[i] == FREE) {
printk("%s: abort, mbox %d is free.\n", BN(j), i);
return SUCCESS;
}
if (HD(j)->cp_stat[i] == IN_USE) {
printk("%s: abort, mbox %d is in use.\n", BN(j), i);
if (SCarg != HD(j)->cp[i].SCpnt)
panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
return FAILED;
}
if (HD(j)->cp_stat[i] == IN_RESET) {
printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
return FAILED;
}
if (HD(j)->cp_stat[i] == LOCKED) {
printk("%s: abort, mbox %d is locked.\n", BN(j), i);
return SUCCESS;
}
if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
unmap_dma(i, j);
SCarg->result = DID_ABORT << 16;
SCarg->host_scribble = NULL;
HD(j)->cp_stat[i] = FREE;
printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
SCarg->scsi_done(SCarg);
return SUCCESS;
}
panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
}
static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
unsigned int i, j, k, c, limit = 0;
unsigned long time;
int arg_done = FALSE;
struct scsi_cmnd *SCpnt;
j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
spin_lock_irq(sh[j]->host_lock);
if (SCarg->host_scribble == NULL)
printk("%s: reset, inactive.\n", BN(j));
if (HD(j)->in_reset) {
printk("%s: reset, exit, already in reset.\n", BN(j));
spin_unlock_irq(sh[j]->host_lock);
return FAILED;
}
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
printk("%s: reset, exit, timeout error.\n", BN(j));
spin_unlock_irq(sh[j]->host_lock);
return FAILED;
}
HD(j)->retries = 0;
for (c = 0; c <= sh[j]->max_channel; c++)
for (k = 0; k < sh[j]->max_id; k++) {
HD(j)->target_redo[k][c] = TRUE;
HD(j)->target_to[k][c] = 0;
}
for (i = 0; i < sh[j]->can_queue; i++) {
if (HD(j)->cp_stat[i] == FREE) continue;
if (HD(j)->cp_stat[i] == LOCKED) {
HD(j)->cp_stat[i] = FREE;
printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
continue;
}
if (!(SCpnt = HD(j)->cp[i].SCpnt))
panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
HD(j)->cp_stat[i] = ABORTING;
printk("%s: reset, mbox %d aborting.\n", BN(j), i);
}
else {
HD(j)->cp_stat[i] = IN_RESET;
printk("%s: reset, mbox %d in reset.\n", BN(j), i);
}
if (SCpnt->host_scribble == NULL)
panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
if (*(unsigned int *)SCpnt->host_scribble != i)
panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
if (SCpnt->scsi_done == NULL)
panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
if (SCpnt == SCarg) arg_done = TRUE;
}
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
printk("%s: reset, cannot reset, timeout error.\n", BN(j));
spin_unlock_irq(sh[j]->host_lock);
return FAILED;
}
outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
#if defined(DEBUG_RESET)
do_trace = TRUE;
#endif
HD(j)->in_reset = TRUE;
spin_unlock_irq(sh[j]->host_lock);
time = jiffies;
while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
spin_lock_irq(sh[j]->host_lock);
printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
for (i = 0; i < sh[j]->can_queue; i++) {
if (HD(j)->cp_stat[i] == IN_RESET) {
SCpnt = HD(j)->cp[i].SCpnt;
unmap_dma(i, j);
SCpnt->result = DID_RESET << 16;
SCpnt->host_scribble = NULL;
/* This mailbox is still waiting for its interrupt */
HD(j)->cp_stat[i] = LOCKED;
printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
}
else if (HD(j)->cp_stat[i] == ABORTING) {
SCpnt = HD(j)->cp[i].SCpnt;
unmap_dma(i, j);
SCpnt->result = DID_RESET << 16;
SCpnt->host_scribble = NULL;
/* This mailbox was never queued to the adapter */
HD(j)->cp_stat[i] = FREE;
printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
}
else
/* Any other mailbox has already been set free by interrupt */
continue;
SCpnt->scsi_done(SCpnt);
}
HD(j)->in_reset = FALSE;
do_trace = FALSE;
if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
else printk("%s: reset, exit.\n", BN(j));
spin_unlock_irq(sh[j]->host_lock);
return SUCCESS;
}
static int u14_34f_bios_param(struct scsi_device *disk,
struct block_device *bdev, sector_t capacity, int *dkinfo) {
unsigned int j = 0;
unsigned int size = capacity;
dkinfo[0] = HD(j)->heads;
dkinfo[1] = HD(j)->sectors;
dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) {
dkinfo[0] = 255;
dkinfo[1] = 63;
dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
}
#if defined (DEBUG_GEOMETRY)
printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name,
dkinfo[0], dkinfo[1], dkinfo[2]);
#endif
return FALSE;
}
static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
unsigned int rev) {
unsigned int i, j, k, y;
unsigned long x;
for (i = 0; i < n - 1; i++) {
k = i;
for (j = k + 1; j < n; j++)
if (rev) {
if (sk[j] > sk[k]) k = j;
}
else {
if (sk[j] < sk[k]) k = j;
}
if (k != i) {
x = sk[k]; sk[k] = sk[i]; sk[i] = x;
y = da[k]; da[k] = da[i]; da[i] = y;
}
}
return;
}
static int reorder(unsigned int j, unsigned long cursec,
unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
struct scsi_cmnd *SCpnt;
struct mscp *cpp;
unsigned int k, n;
unsigned int rev = FALSE, s = TRUE, r = TRUE;
unsigned int input_only = TRUE, overlap = FALSE;
unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
unsigned long ioseek = 0;
static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
static unsigned int readysorted = 0, revcount = 0;
static unsigned long seeksorted = 0, seeknosort = 0;
if (link_statistics && !(++flushcount % link_statistics))
printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
" av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
ovlcount, readycount, readysorted, sortcount, revcount,
seeknosort / (readycount + 1),
seeksorted / (readycount + 1));
if (n_ready <= 1) return FALSE;
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
if (blk_rq_pos(SCpnt->request) < minsec)
minsec = blk_rq_pos(SCpnt->request);
if (blk_rq_pos(SCpnt->request) > maxsec)
maxsec = blk_rq_pos(SCpnt->request);
sl[n] = blk_rq_pos(SCpnt->request);
ioseek += blk_rq_sectors(SCpnt->request);
if (!n) continue;
if (sl[n] < sl[n - 1]) s = FALSE;
if (sl[n] > sl[n - 1]) r = FALSE;
if (link_statistics) {
if (sl[n] > sl[n - 1])
seek += sl[n] - sl[n - 1];
else
seek += sl[n - 1] - sl[n];
}
}
if (link_statistics) {
if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
}
if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
if (!input_only) for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
if (!n) continue;
if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
|| (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
}
if (overlap) sort(pl, il, n_ready, FALSE);
if (link_statistics) {
if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
batchcount++; readycount += n_ready; seeknosort += seek / 1024;
if (input_only) inputcount++;
if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
else seeksorted += (iseek + maxsec - minsec) / 1024;
if (rev && !r) { revcount++; readysorted += n_ready; }
if (!rev && !s) { sortcount++; readysorted += n_ready; }
}
#if defined(DEBUG_LINKED_COMMANDS)
if (link_statistics && (overlap || !(flushcount % link_statistics)))
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
SCpnt->lun, k, flushcount, n_ready,
blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->xdir);
}
#endif
return overlap;
}
static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j,
unsigned int ihdlr) {
struct scsi_cmnd *SCpnt;
struct mscp *cpp;
unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
for (k = 0; k < sh[j]->can_queue; k++) {
if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
if (SCpnt->device != dev) continue;
if (HD(j)->cp_stat[k] == IN_USE) return;
il[n_ready++] = k;
}
if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
scmd_printk(KERN_INFO, SCpnt,
"%s, mbox %d, adapter"
" busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
k);
HD(j)->cp_stat[k] = ABORTING;
continue;
}
outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
HD(j)->cp_stat[k] = IN_USE;
}
}
static irqreturn_t ihdlr(unsigned int j)
{
struct scsi_cmnd *SCpnt;
unsigned int i, k, c, status, tstatus, reg, ret;
struct mscp *spp, *cpp;
int irq = sh[j]->irq;
/* Check if this board need to be serviced */
if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
HD(j)->iocount++;
if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
HD(j)->iocount);
/* Check if this board is still busy */
if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
BN(j), irq, reg, HD(j)->iocount);
goto none;
}
ret = inl(sh[j]->io_port + REG_ICM);
/* Clear interrupt pending flag */
outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
/* Find the mailbox to be serviced on this board */
for (i = 0; i < sh[j]->can_queue; i++)
if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break;
if (i >= sh[j]->can_queue)
panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
(void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr));
cpp = &(HD(j)->cp[i]);
spp = cpp;
#if defined(DEBUG_GENERATE_ABORTS)
if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled;
#endif
if (HD(j)->cp_stat[i] == IGNORE) {
HD(j)->cp_stat[i] = FREE;
goto handled;
}
else if (HD(j)->cp_stat[i] == LOCKED) {
HD(j)->cp_stat[i] = FREE;
printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
HD(j)->iocount);
goto handled;
}
else if (HD(j)->cp_stat[i] == FREE) {
printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
HD(j)->iocount);
goto handled;
}
else if (HD(j)->cp_stat[i] == IN_RESET)
printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
else if (HD(j)->cp_stat[i] != IN_USE)
panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
BN(j), i, HD(j)->cp_stat[i]);
HD(j)->cp_stat[i] = FREE;
SCpnt = cpp->SCpnt;
if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
if (SCpnt->host_scribble == NULL)
panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
SCpnt);
if (*(unsigned int *)SCpnt->host_scribble != i)
panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
BN(j), i, *(unsigned int *)SCpnt->host_scribble);
sync_dma(i, j);
if (linked_comm && SCpnt->device->queue_depth > 2
&& TLDEV(SCpnt->device->type))
flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
tstatus = status_byte(spp->target_status);
#if defined(DEBUG_GENERATE_ERRORS)
if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
spp->adapter_status = 0x01;
#endif
switch (spp->adapter_status) {
case ASOK: /* status OK */
/* Forces a reset if a disk drive keeps returning BUSY */
if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
status = DID_ERROR << 16;
/* If there was a bus reset, redo operation on each target */
else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
&& HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)])
status = DID_BUS_BUSY << 16;
/* Works around a flaw in scsi.c */
else if (tstatus == CHECK_CONDITION
&& SCpnt->device->type == TYPE_DISK
&& (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
status = DID_BUS_BUSY << 16;
else
status = DID_OK << 16;
if (tstatus == GOOD)
HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE;
if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
(!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
(SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
scmd_printk(KERN_INFO, SCpnt,
"ihdlr, target_status 0x%x, sense key 0x%x.\n",
spp->target_status,
SCpnt->sense_buffer[2]);
HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0;
break;
case ASST: /* Selection Time Out */
if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1)
status = DID_ERROR << 16;
else {
status = DID_TIME_OUT << 16;
HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++;
}
break;
/* Perform a limited number of internal retries */
case 0x93: /* Unexpected bus free */
case 0x94: /* Target bus phase sequence failure */
case 0x96: /* Illegal SCSI command */
case 0xa3: /* SCSI bus reset error */
for (c = 0; c <= sh[j]->max_channel; c++)
for (k = 0; k < sh[j]->max_id; k++)
HD(j)->target_redo[k][c] = TRUE;
case 0x92: /* Data over/under-run */
if (SCpnt->device->type != TYPE_TAPE
&& HD(j)->retries < MAX_INTERNAL_RETRIES) {
#if defined(DID_SOFT_ERROR)
status = DID_SOFT_ERROR << 16;
#else
status = DID_BUS_BUSY << 16;
#endif
HD(j)->retries++;
HD(j)->last_retried_pid = SCpnt->serial_number;
}
else
status = DID_ERROR << 16;
break;
case 0x01: /* Invalid command */
case 0x02: /* Invalid parameters */
case 0x03: /* Invalid data list */
case 0x84: /* SCSI bus abort error */
case 0x9b: /* Auto request sense error */
case 0x9f: /* Unexpected command complete message error */
case 0xff: /* Invalid parameter in the S/G list */
default:
status = DID_ERROR << 16;
break;
}
SCpnt->result = status | spp->target_status;
#if defined(DEBUG_INTERRUPT)
if (SCpnt->result || do_trace)
#else
if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
(spp->adapter_status != ASOK &&
spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
do_trace || msg_byte(spp->target_status))
#endif
scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
" reg 0x%x, count %d.\n",
i, spp->adapter_status, spp->target_status,
reg, HD(j)->iocount);
unmap_dma(i, j);
/* Set the command state to inactive */
SCpnt->host_scribble = NULL;
SCpnt->scsi_done(SCpnt);
if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
HD(j)->iocount);
handled:
return IRQ_HANDLED;
none:
return IRQ_NONE;
}
static irqreturn_t do_interrupt_handler(int irq, void *shap) {
unsigned int j;
unsigned long spin_flags;
irqreturn_t ret;
/* Check if the interrupt must be processed by this handler */
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
spin_lock_irqsave(sh[j]->host_lock, spin_flags);
ret = ihdlr(j);
spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
return ret;
}
static int u14_34f_release(struct Scsi_Host *shpnt) {
unsigned int i, j;
for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
if (sh[j] == NULL)
panic("%s: release, invalid Scsi_Host pointer.\n", driver_name);
for (i = 0; i < sh[j]->can_queue; i++)
kfree((&HD(j)->cp[i])->sglist);
for (i = 0; i < sh[j]->can_queue; i++)
pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr,
sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
free_irq(sh[j]->irq, &sha[j]);
if (sh[j]->dma_channel != NO_DMA)
free_dma(sh[j]->dma_channel);
release_region(sh[j]->io_port, sh[j]->n_io_port);
scsi_unregister(sh[j]);
return FALSE;
}
#include "scsi_module.c"
#ifndef MODULE
__setup("u14-34f=", option_setup);
#endif /* end MODULE */
| gpl-2.0 |
ytjiang/linux | drivers/net/team/team_mode_broadcast.c | 4062 | 1836 | /*
* drivers/net/team/team_mode_broadcast.c - Broadcast mode for team
* Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/if_team.h>
static bool bc_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *cur;
struct team_port *last = NULL;
struct sk_buff *skb2;
bool ret;
bool sum_ret = false;
list_for_each_entry_rcu(cur, &team->port_list, list) {
if (team_port_txable(cur)) {
if (last) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
ret = !team_dev_queue_xmit(team, last,
skb2);
if (!sum_ret)
sum_ret = ret;
}
}
last = cur;
}
}
if (last) {
ret = !team_dev_queue_xmit(team, last, skb);
if (!sum_ret)
sum_ret = ret;
}
return sum_ret;
}
static const struct team_mode_ops bc_mode_ops = {
.transmit = bc_transmit,
.port_enter = team_modeop_port_enter,
.port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode bc_mode = {
.kind = "broadcast",
.owner = THIS_MODULE,
.ops = &bc_mode_ops,
};
static int __init bc_init_module(void)
{
return team_mode_register(&bc_mode);
}
static void __exit bc_cleanup_module(void)
{
team_mode_unregister(&bc_mode);
}
module_init(bc_init_module);
module_exit(bc_cleanup_module);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
MODULE_DESCRIPTION("Broadcast mode for team");
MODULE_ALIAS("team-mode-broadcast");
| gpl-2.0 |
elektroschmock/android_kernel_lge_hammerhead | arch/arm/mach-exynos/dev-audio.c | 4574 | 7760 | /* linux/arch/arm/mach-exynos4/dev-audio.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright (c) 2010 Samsung Electronics Co. Ltd
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <plat/gpio-cfg.h>
#include <plat/audio.h>
#include <mach/map.h>
#include <mach/dma.h>
#include <mach/irqs.h>
#include <mach/regs-audss.h>
static const char *rclksrc[] = {
[0] = "busclk",
[1] = "i2sclk",
};
static int exynos4_cfg_i2s(struct platform_device *pdev)
{
/* configure GPIO for i2s port */
switch (pdev->id) {
case 0:
s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 7, S3C_GPIO_SFN(2));
break;
case 1:
s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(2));
break;
case 2:
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(4));
break;
default:
printk(KERN_ERR "Invalid Device %d\n", pdev->id);
return -EINVAL;
}
return 0;
}
static struct s3c_audio_pdata i2sv5_pdata = {
.cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
| QUIRK_NEED_RSTCLR,
.src_clk = rclksrc,
.idma_addr = EXYNOS4_AUDSS_INT_MEM,
},
},
};
static struct resource exynos4_i2s0_resource[] = {
[0] = {
.start = EXYNOS4_PA_I2S0,
.end = EXYNOS4_PA_I2S0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S0_TX,
.end = DMACH_I2S0_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S0_RX,
.end = DMACH_I2S0_RX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMACH_I2S0S_TX,
.end = DMACH_I2S0S_TX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_i2s0 = {
.name = "samsung-i2s",
.id = 0,
.num_resources = ARRAY_SIZE(exynos4_i2s0_resource),
.resource = exynos4_i2s0_resource,
.dev = {
.platform_data = &i2sv5_pdata,
},
};
static const char *rclksrc_v3[] = {
[0] = "sclk_i2s",
[1] = "no_such_clock",
};
static struct s3c_audio_pdata i2sv3_pdata = {
.cfg_gpio = exynos4_cfg_i2s,
.type = {
.i2s = {
.quirks = QUIRK_NO_MUXPSR,
.src_clk = rclksrc_v3,
},
},
};
static struct resource exynos4_i2s1_resource[] = {
[0] = {
.start = EXYNOS4_PA_I2S1,
.end = EXYNOS4_PA_I2S1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S1_TX,
.end = DMACH_I2S1_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S1_RX,
.end = DMACH_I2S1_RX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_i2s1 = {
.name = "samsung-i2s",
.id = 1,
.num_resources = ARRAY_SIZE(exynos4_i2s1_resource),
.resource = exynos4_i2s1_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
static struct resource exynos4_i2s2_resource[] = {
[0] = {
.start = EXYNOS4_PA_I2S2,
.end = EXYNOS4_PA_I2S2 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_I2S2_TX,
.end = DMACH_I2S2_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_I2S2_RX,
.end = DMACH_I2S2_RX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_i2s2 = {
.name = "samsung-i2s",
.id = 2,
.num_resources = ARRAY_SIZE(exynos4_i2s2_resource),
.resource = exynos4_i2s2_resource,
.dev = {
.platform_data = &i2sv3_pdata,
},
};
/* PCM Controller platform_devices */
static int exynos4_pcm_cfg_gpio(struct platform_device *pdev)
{
switch (pdev->id) {
case 0:
s3c_gpio_cfgpin_range(EXYNOS4_GPZ(0), 5, S3C_GPIO_SFN(3));
break;
case 1:
s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(3));
break;
case 2:
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 5, S3C_GPIO_SFN(3));
break;
default:
printk(KERN_DEBUG "Invalid PCM Controller number!");
return -EINVAL;
}
return 0;
}
static struct s3c_audio_pdata s3c_pcm_pdata = {
.cfg_gpio = exynos4_pcm_cfg_gpio,
};
static struct resource exynos4_pcm0_resource[] = {
[0] = {
.start = EXYNOS4_PA_PCM0,
.end = EXYNOS4_PA_PCM0 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM0_TX,
.end = DMACH_PCM0_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM0_RX,
.end = DMACH_PCM0_RX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_pcm0 = {
.name = "samsung-pcm",
.id = 0,
.num_resources = ARRAY_SIZE(exynos4_pcm0_resource),
.resource = exynos4_pcm0_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
static struct resource exynos4_pcm1_resource[] = {
[0] = {
.start = EXYNOS4_PA_PCM1,
.end = EXYNOS4_PA_PCM1 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM1_TX,
.end = DMACH_PCM1_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM1_RX,
.end = DMACH_PCM1_RX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_pcm1 = {
.name = "samsung-pcm",
.id = 1,
.num_resources = ARRAY_SIZE(exynos4_pcm1_resource),
.resource = exynos4_pcm1_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
static struct resource exynos4_pcm2_resource[] = {
[0] = {
.start = EXYNOS4_PA_PCM2,
.end = EXYNOS4_PA_PCM2 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_PCM2_TX,
.end = DMACH_PCM2_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_PCM2_RX,
.end = DMACH_PCM2_RX,
.flags = IORESOURCE_DMA,
},
};
struct platform_device exynos4_device_pcm2 = {
.name = "samsung-pcm",
.id = 2,
.num_resources = ARRAY_SIZE(exynos4_pcm2_resource),
.resource = exynos4_pcm2_resource,
.dev = {
.platform_data = &s3c_pcm_pdata,
},
};
/* AC97 Controller platform devices */
static int exynos4_ac97_cfg_gpio(struct platform_device *pdev)
{
return s3c_gpio_cfgpin_range(EXYNOS4_GPC0(0), 5, S3C_GPIO_SFN(4));
}
static struct resource exynos4_ac97_resource[] = {
[0] = {
.start = EXYNOS4_PA_AC97,
.end = EXYNOS4_PA_AC97 + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_AC97_PCMOUT,
.end = DMACH_AC97_PCMOUT,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMACH_AC97_PCMIN,
.end = DMACH_AC97_PCMIN,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMACH_AC97_MICIN,
.end = DMACH_AC97_MICIN,
.flags = IORESOURCE_DMA,
},
[4] = {
.start = EXYNOS4_IRQ_AC97,
.end = EXYNOS4_IRQ_AC97,
.flags = IORESOURCE_IRQ,
},
};
static struct s3c_audio_pdata s3c_ac97_pdata = {
.cfg_gpio = exynos4_ac97_cfg_gpio,
};
static u64 exynos4_ac97_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_ac97 = {
.name = "samsung-ac97",
.id = -1,
.num_resources = ARRAY_SIZE(exynos4_ac97_resource),
.resource = exynos4_ac97_resource,
.dev = {
.platform_data = &s3c_ac97_pdata,
.dma_mask = &exynos4_ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/* S/PDIF Controller platform_device */
static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
{
s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
return 0;
}
static struct resource exynos4_spdif_resource[] = {
[0] = {
.start = EXYNOS4_PA_SPDIF,
.end = EXYNOS4_PA_SPDIF + 0x100 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMACH_SPDIF,
.end = DMACH_SPDIF,
.flags = IORESOURCE_DMA,
},
};
static struct s3c_audio_pdata samsung_spdif_pdata = {
.cfg_gpio = exynos4_spdif_cfg_gpio,
};
static u64 exynos4_spdif_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_spdif = {
.name = "samsung-spdif",
.id = -1,
.num_resources = ARRAY_SIZE(exynos4_spdif_resource),
.resource = exynos4_spdif_resource,
.dev = {
.platform_data = &samsung_spdif_pdata,
.dma_mask = &exynos4_spdif_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
| gpl-2.0 |
aditisstillalive/android_kernel_lge_hammerhead | drivers/input/touchscreen/wm831x-ts.c | 4830 | 11780 | /*
* Touchscreen driver for WM831x PMICs
*
* Copyright 2011 Wolfson Microelectronics plc.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/pm.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
/*
* R16424 (0x4028) - Touch Control 1
*/
#define WM831X_TCH_ENA 0x8000 /* TCH_ENA */
#define WM831X_TCH_CVT_ENA 0x4000 /* TCH_CVT_ENA */
#define WM831X_TCH_SLPENA 0x1000 /* TCH_SLPENA */
#define WM831X_TCH_Z_ENA 0x0400 /* TCH_Z_ENA */
#define WM831X_TCH_Y_ENA 0x0200 /* TCH_Y_ENA */
#define WM831X_TCH_X_ENA 0x0100 /* TCH_X_ENA */
#define WM831X_TCH_DELAY_MASK 0x00E0 /* TCH_DELAY - [7:5] */
#define WM831X_TCH_DELAY_SHIFT 5 /* TCH_DELAY - [7:5] */
#define WM831X_TCH_DELAY_WIDTH 3 /* TCH_DELAY - [7:5] */
#define WM831X_TCH_RATE_MASK 0x001F /* TCH_RATE - [4:0] */
#define WM831X_TCH_RATE_SHIFT 0 /* TCH_RATE - [4:0] */
#define WM831X_TCH_RATE_WIDTH 5 /* TCH_RATE - [4:0] */
/*
* R16425 (0x4029) - Touch Control 2
*/
#define WM831X_TCH_PD_WK 0x2000 /* TCH_PD_WK */
#define WM831X_TCH_5WIRE 0x1000 /* TCH_5WIRE */
#define WM831X_TCH_PDONLY 0x0800 /* TCH_PDONLY */
#define WM831X_TCH_ISEL 0x0100 /* TCH_ISEL */
#define WM831X_TCH_RPU_MASK 0x000F /* TCH_RPU - [3:0] */
#define WM831X_TCH_RPU_SHIFT 0 /* TCH_RPU - [3:0] */
#define WM831X_TCH_RPU_WIDTH 4 /* TCH_RPU - [3:0] */
/*
* R16426-8 (0x402A-C) - Touch Data X/Y/X
*/
#define WM831X_TCH_PD 0x8000 /* TCH_PD1 */
#define WM831X_TCH_DATA_MASK 0x0FFF /* TCH_DATA - [11:0] */
#define WM831X_TCH_DATA_SHIFT 0 /* TCH_DATA - [11:0] */
#define WM831X_TCH_DATA_WIDTH 12 /* TCH_DATA - [11:0] */
struct wm831x_ts {
struct input_dev *input_dev;
struct wm831x *wm831x;
unsigned int data_irq;
unsigned int pd_irq;
bool pressure;
bool pen_down;
struct work_struct pd_data_work;
};
static void wm831x_pd_data_work(struct work_struct *work)
{
struct wm831x_ts *wm831x_ts =
container_of(work, struct wm831x_ts, pd_data_work);
if (wm831x_ts->pen_down) {
enable_irq(wm831x_ts->data_irq);
dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n");
} else {
enable_irq(wm831x_ts->pd_irq);
dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n");
}
}
static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
{
struct wm831x_ts *wm831x_ts = irq_data;
struct wm831x *wm831x = wm831x_ts->wm831x;
static int data_types[] = { ABS_X, ABS_Y, ABS_PRESSURE };
u16 data[3];
int count;
int i, ret;
if (wm831x_ts->pressure)
count = 3;
else
count = 2;
wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
ret = wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count,
data);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to read touch data: %d\n",
ret);
return IRQ_NONE;
}
/*
* We get a pen down reading on every reading, report pen up if any
* individual reading does so.
*/
wm831x_ts->pen_down = true;
for (i = 0; i < count; i++) {
if (!(data[i] & WM831X_TCH_PD)) {
wm831x_ts->pen_down = false;
continue;
}
input_report_abs(wm831x_ts->input_dev, data_types[i],
data[i] & WM831X_TCH_DATA_MASK);
}
if (!wm831x_ts->pen_down) {
/* Switch from data to pen down */
dev_dbg(wm831x->dev, "IRQ DATA->PD\n");
disable_irq_nosync(wm831x_ts->data_irq);
/* Don't need data any more */
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
WM831X_TCH_Z_ENA, 0);
/* Flush any final samples that arrived while reading */
wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
WM831X_TCHDATA_EINT, WM831X_TCHDATA_EINT);
wm831x_bulk_read(wm831x, WM831X_TOUCH_DATA_X, count, data);
if (wm831x_ts->pressure)
input_report_abs(wm831x_ts->input_dev,
ABS_PRESSURE, 0);
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
schedule_work(&wm831x_ts->pd_data_work);
} else {
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
}
input_sync(wm831x_ts->input_dev);
return IRQ_HANDLED;
}
static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
{
struct wm831x_ts *wm831x_ts = irq_data;
struct wm831x *wm831x = wm831x_ts->wm831x;
int ena = 0;
if (wm831x_ts->pen_down)
return IRQ_HANDLED;
disable_irq_nosync(wm831x_ts->pd_irq);
/* Start collecting data */
if (wm831x_ts->pressure)
ena |= WM831X_TCH_Z_ENA;
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
wm831x_ts->pen_down = true;
/* Switch from pen down to data */
dev_dbg(wm831x->dev, "IRQ PD->DATA\n");
schedule_work(&wm831x_ts->pd_data_work);
return IRQ_HANDLED;
}
static int wm831x_ts_input_open(struct input_dev *idev)
{
struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
struct wm831x *wm831x = wm831x_ts->wm831x;
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
WM831X_TCH_Z_ENA, WM831X_TCH_ENA);
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_CVT_ENA, WM831X_TCH_CVT_ENA);
return 0;
}
static void wm831x_ts_input_close(struct input_dev *idev)
{
struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
struct wm831x *wm831x = wm831x_ts->wm831x;
/* Shut the controller down, disabling all other functionality too */
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_ENA | WM831X_TCH_X_ENA |
WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0);
/* Make sure any pending IRQs are done, the above will prevent
* new ones firing.
*/
synchronize_irq(wm831x_ts->data_irq);
synchronize_irq(wm831x_ts->pd_irq);
/* Make sure the IRQ completion work is quiesced */
flush_work_sync(&wm831x_ts->pd_data_work);
/* If we ended up with the pen down then make sure we revert back
* to pen detection state for the next time we start up.
*/
if (wm831x_ts->pen_down) {
disable_irq(wm831x_ts->data_irq);
enable_irq(wm831x_ts->pd_irq);
wm831x_ts->pen_down = false;
}
}
static __devinit int wm831x_ts_probe(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts;
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
struct wm831x_touch_pdata *pdata = NULL;
struct input_dev *input_dev;
int error, irqf;
if (core_pdata)
pdata = core_pdata->touch;
wm831x_ts = kzalloc(sizeof(struct wm831x_ts), GFP_KERNEL);
input_dev = input_allocate_device();
if (!wm831x_ts || !input_dev) {
error = -ENOMEM;
goto err_alloc;
}
wm831x_ts->wm831x = wm831x;
wm831x_ts->input_dev = input_dev;
INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work);
/*
* If we have a direct IRQ use it, otherwise use the interrupt
* from the WM831x IRQ controller.
*/
if (pdata && pdata->data_irq)
wm831x_ts->data_irq = pdata->data_irq;
else
wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
if (pdata && pdata->pd_irq)
wm831x_ts->pd_irq = pdata->pd_irq;
else
wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
if (pdata)
wm831x_ts->pressure = pdata->pressure;
else
wm831x_ts->pressure = true;
/* Five wire touchscreens can't report pressure */
if (pdata && pdata->fivewire) {
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
WM831X_TCH_5WIRE, WM831X_TCH_5WIRE);
/* Pressure measurements are not possible for five wire mode */
WARN_ON(pdata->pressure && pdata->fivewire);
wm831x_ts->pressure = false;
} else {
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
WM831X_TCH_5WIRE, 0);
}
if (pdata) {
switch (pdata->isel) {
default:
dev_err(&pdev->dev, "Unsupported ISEL setting: %d\n",
pdata->isel);
/* Fall through */
case 200:
case 0:
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
WM831X_TCH_ISEL, 0);
break;
case 400:
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
WM831X_TCH_ISEL, WM831X_TCH_ISEL);
break;
}
}
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_2,
WM831X_TCH_PDONLY, 0);
/* Default to 96 samples/sec */
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_RATE_MASK, 6);
if (pdata && pdata->data_irqf)
irqf = pdata->data_irqf;
else
irqf = IRQF_TRIGGER_HIGH;
error = request_threaded_irq(wm831x_ts->data_irq,
NULL, wm831x_ts_data_irq,
irqf | IRQF_ONESHOT,
"Touchscreen data", wm831x_ts);
if (error) {
dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
wm831x_ts->data_irq, error);
goto err_alloc;
}
disable_irq(wm831x_ts->data_irq);
if (pdata && pdata->pd_irqf)
irqf = pdata->pd_irqf;
else
irqf = IRQF_TRIGGER_HIGH;
error = request_threaded_irq(wm831x_ts->pd_irq,
NULL, wm831x_ts_pen_down_irq,
irqf | IRQF_ONESHOT,
"Touchscreen pen down", wm831x_ts);
if (error) {
dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
wm831x_ts->pd_irq, error);
goto err_data_irq;
}
/* set up touch configuration */
input_dev->name = "WM831x touchscreen";
input_dev->phys = "wm831x";
input_dev->open = wm831x_ts_input_open;
input_dev->close = wm831x_ts_input_close;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
input_set_abs_params(input_dev, ABS_X, 0, 4095, 5, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 4095, 5, 0);
if (wm831x_ts->pressure)
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 4095, 5, 0);
input_set_drvdata(input_dev, wm831x_ts);
input_dev->dev.parent = &pdev->dev;
error = input_register_device(input_dev);
if (error)
goto err_pd_irq;
platform_set_drvdata(pdev, wm831x_ts);
return 0;
err_pd_irq:
free_irq(wm831x_ts->pd_irq, wm831x_ts);
err_data_irq:
free_irq(wm831x_ts->data_irq, wm831x_ts);
err_alloc:
input_free_device(input_dev);
kfree(wm831x_ts);
return error;
}
static __devexit int wm831x_ts_remove(struct platform_device *pdev)
{
struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
free_irq(wm831x_ts->pd_irq, wm831x_ts);
free_irq(wm831x_ts->data_irq, wm831x_ts);
input_unregister_device(wm831x_ts->input_dev);
kfree(wm831x_ts);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver wm831x_ts_driver = {
.driver = {
.name = "wm831x-touch",
.owner = THIS_MODULE,
},
.probe = wm831x_ts_probe,
.remove = __devexit_p(wm831x_ts_remove),
};
module_platform_driver(wm831x_ts_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x PMIC touchscreen driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-touch");
| gpl-2.0 |
Electrex/Electroactive-N5 | arch/arm/mach-prima2/prima2.c | 4830 | 1063 | /*
* Defines machines for CSR SiRFprimaII
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include "common.h"
static struct of_device_id sirfsoc_of_bus_ids[] __initdata = {
{ .compatible = "simple-bus", },
{},
};
void __init sirfsoc_mach_init(void)
{
of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
}
static const char *prima2cb_dt_match[] __initdata = {
"sirf,prima2-cb",
NULL
};
MACHINE_START(PRIMA2_EVB, "prima2cb")
/* Maintainer: Barry Song <baohua.song@csr.com> */
.atag_offset = 0x100,
.init_early = sirfsoc_of_clk_init,
.map_io = sirfsoc_map_lluart,
.init_irq = sirfsoc_of_irq_init,
.timer = &sirfsoc_timer,
.dma_zone_size = SZ_256M,
.init_machine = sirfsoc_mach_init,
.dt_compat = prima2cb_dt_match,
.restart = sirfsoc_restart,
MACHINE_END
| gpl-2.0 |
ali-filth/android_kernel_samsung_arubaslim | drivers/staging/rts_pstor/rtsx_transport.c | 4830 | 19955 | /* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include "rtsx.h"
#include "rtsx_scsi.h"
#include "rtsx_transport.h"
#include "rtsx_chip.h"
#include "rtsx_card.h"
#include "debug.h"
/***********************************************************************
* Scatter-gather transfer buffer access routines
***********************************************************************/
/* Copy a buffer of length buflen to/from the srb's transfer buffer.
* (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
* points to a list of s-g entries and we ignore srb->request_bufflen.
* For non-scatter-gather transfers, srb->request_buffer points to the
* transfer buffer itself and srb->request_bufflen is the buffer's length.)
* Update the *index and *offset variables so that the next copy will
* pick up from where this one left off. */
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
unsigned int *offset, enum xfer_buf_dir dir)
{
unsigned int cnt;
/* If not using scatter-gather, just transfer the data directly.
* Make certain it will fit in the available buffer space. */
if (scsi_sg_count(srb) == 0) {
if (*offset >= scsi_bufflen(srb))
return 0;
cnt = min(buflen, scsi_bufflen(srb) - *offset);
if (dir == TO_XFER_BUF)
memcpy((unsigned char *) scsi_sglist(srb) + *offset,
buffer, cnt);
else
memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
*offset, cnt);
*offset += cnt;
/* Using scatter-gather. We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages, and
* each page has to be kmap()'ed separately. If the page is already
* in kernel-addressable memory then kmap() will return its address.
* If the page is not directly accessible -- such as a user buffer
* located in high memory -- then kmap() will map it to a temporary
* position in the kernel's virtual address space. */
} else {
struct scatterlist *sg =
(struct scatterlist *) scsi_sglist(srb)
+ *index;
/* This loop handles a single s-g list entry, which may
* include multiple pages. Find the initial page structure
* and the starting offset within the page, and update
* the *offset and *index values for the next loop. */
cnt = 0;
while (cnt < buflen && *index < scsi_sg_count(srb)) {
struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff =
(sg->offset + *offset) & (PAGE_SIZE-1);
unsigned int sglen = sg->length - *offset;
if (sglen > buflen - cnt) {
/* Transfer ends within this s-g entry */
sglen = buflen - cnt;
*offset += sglen;
} else {
/* Transfer continues to next s-g entry */
*offset = 0;
++*index;
++sg;
}
/* Transfer the data for all the pages in this
* s-g entry. For each page: call kmap(), do the
* transfer, and call kunmap() immediately after. */
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
memcpy(ptr + poff, buffer + cnt, plen);
else
memcpy(buffer + cnt, ptr + poff, plen);
kunmap(page);
/* Start at the beginning of the next page */
poff = 0;
++page;
cnt += plen;
sglen -= plen;
}
}
}
/* Return the amount actually transferred */
return cnt;
}
/* Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue. */
void rtsx_stor_set_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
TO_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
void rtsx_stor_get_xfer_buf(unsigned char *buffer,
unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
FROM_XFER_BUF);
if (buflen < scsi_bufflen(srb))
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
/***********************************************************************
* Transport routines
***********************************************************************/
/* Invoke the transport and basic error-handling/recovery methods
*
* This is used to send the message to the device and receive the response.
*/
void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
int result;
result = rtsx_scsi_handler(srb, chip);
/* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
RTSX_DEBUGP("-- command was aborted\n");
srb->result = DID_ABORT << 16;
goto Handle_Errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == TRANSPORT_ERROR) {
RTSX_DEBUGP("-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
goto Handle_Errors;
}
srb->result = SAM_STAT_GOOD;
/*
* If we have a failure, we're going to do a REQUEST_SENSE
* automatically. Note that we differentiate between a command
* "failure" and an "error" in the transport mechanism.
*/
if (result == TRANSPORT_FAILED) {
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
(unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
sizeof(struct sense_data_t));
}
return;
/* Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
* device reset. */
Handle_Errors:
return;
}
void rtsx_add_cmd(struct rtsx_chip *chip,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
{
u32 *cb = (u32 *)(chip->host_cmds_ptr);
u32 val = 0;
val |= (u32)(cmd_type & 0x03) << 30;
val |= (u32)(reg_addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
spin_lock_irq(&chip->rtsx->reg_lock);
if (chip->ci < (HOST_CMDS_BUF_LEN / 4)) {
cb[(chip->ci)++] = cpu_to_le32(val);
}
spin_unlock_irq(&chip->rtsx->reg_lock);
}
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
{
u32 val = 1 << 31;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
}
int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u32 val = 1 << 31;
long timeleft;
int err = 0;
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_CMD;
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_writel(chip, RTSX_HCBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
TRACE_GOTO(chip, finish_send_cmd);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
finish_send_cmd:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static inline void rtsx_add_sg_tbl(
struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
{
u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
u64 val = 0;
u32 temp_len = 0;
u8 temp_opt = 0;
do {
if (len > 0x80000) {
temp_len = 0x80000;
temp_opt = option & (~SG_END);
} else {
temp_len = len;
temp_opt = option;
}
val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
sgb[(chip->sgi)++] = cpu_to_le64(val);
len -= temp_len;
addr += temp_len;
} while (len);
}
static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg, unsigned int *index,
unsigned int *offset, int size,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int sg_cnt, i, resid;
int err = 0;
long timeleft;
struct scatterlist *sg_ptr;
u32 val = TRIG_DMA;
if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
resid = size;
sg_ptr = sg;
chip->sgi = 0;
/* Usually the next entry will be @sg@ + 1, but if this sg element
* is part of a chained scatterlist, it could jump to the start of
* a new scatterlist array. So here we use sg_next to move to
* the proper sg
*/
for (i = 0; i < *index; i++)
sg_ptr = sg_next(sg_ptr);
for (i = *index; i < sg_cnt; i++) {
dma_addr_t addr;
unsigned int len;
u8 option;
addr = sg_dma_address(sg_ptr);
len = sg_dma_len(sg_ptr);
RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
RTSX_DEBUGP("*index = %d, *offset = %d\n", *index, *offset);
addr += *offset;
if ((len - *offset) > resid) {
*offset += resid;
len = resid;
resid = 0;
} else {
resid -= (len - *offset);
len -= *offset;
*offset = 0;
*index = *index + 1;
}
if ((i == (sg_cnt - 1)) || !resid) {
option = SG_VALID | SG_END | SG_TRANS_DATA;
} else {
option = SG_VALID | SG_TRANS_DATA;
}
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
if (!resid)
break;
sg_ptr = sg_next(sg_ptr);
}
RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg, int num_sg,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
u8 dir;
int buf_cnt, i;
int err = 0;
long timeleft;
struct scatterlist *sg_ptr;
if ((sg == NULL) || (num_sg <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
rtsx->trans_state = STATE_TRANS_SG;
rtsx->trans_result = TRANS_NOT_READY;
spin_unlock_irq(&rtsx->reg_lock);
buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
sg_ptr = sg;
for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
u32 val = TRIG_DMA;
int sg_cnt, j;
if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8)) {
sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
} else {
sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
}
chip->sgi = 0;
for (j = 0; j < sg_cnt; j++) {
dma_addr_t addr = sg_dma_address(sg_ptr);
unsigned int len = sg_dma_len(sg_ptr);
u8 option;
RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
(unsigned int)addr, len);
if (j == (sg_cnt - 1)) {
option = SG_VALID | SG_END | SG_TRANS_DATA;
} else {
option = SG_VALID | SG_TRANS_DATA;
}
rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
sg_ptr = sg_next(sg_ptr);
}
RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
val |= (u32)(dir & 0x01) << 29;
val |= ADMA_MODE;
spin_lock_irq(&rtsx->reg_lock);
init_completion(&trans_done);
rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
spin_unlock_irq(&rtsx->reg_lock);
goto out;
}
spin_unlock_irq(&rtsx->reg_lock);
sg_ptr += sg_cnt;
}
/* Wait for TRANS_OK_INT */
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_NOT_READY) {
init_completion(&trans_done);
spin_unlock_irq(&rtsx->reg_lock);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
} else {
spin_unlock_irq(&rtsx->reg_lock);
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
enum dma_data_direction dma_dir, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
dma_addr_t addr;
u8 dir;
int err = 0;
u32 val = (1 << 31);
long timeleft;
if ((buf == NULL) || (len <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE) {
dir = HOST_TO_DEVICE;
} else if (dma_dir == DMA_FROM_DEVICE) {
dir = DEVICE_TO_HOST;
} else {
return -ENXIO;
}
addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
if (!addr)
return -ENOMEM;
if (card == SD_CARD) {
rtsx->check_card_cd = SD_EXIST;
} else if (card == MS_CARD) {
rtsx->check_card_cd = MS_EXIST;
} else if (card == XD_CARD) {
rtsx->check_card_cd = XD_EXIST;
} else {
rtsx->check_card_cd = 0;
}
val |= (u32)(dir & 0x01) << 29;
val |= (u32)(len & 0x00FFFFFF);
spin_lock_irq(&rtsx->reg_lock);
/* set up data structures for the wakeup system */
rtsx->done = &trans_done;
init_completion(&trans_done);
rtsx->trans_state = STATE_TRANS_BUF;
rtsx->trans_result = TRANS_NOT_READY;
rtsx_writel(chip, RTSX_HDBAR, addr);
rtsx_writel(chip, RTSX_HDBCTLR, val);
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, timeout * HZ / 1000);
if (timeleft <= 0) {
RTSX_DEBUGP("Timeout (%s %d)\n", __func__, __LINE__);
RTSX_DEBUGP("chip->int_reg = 0x%x\n", chip->int_reg);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irq(&rtsx->reg_lock);
if (rtsx->trans_result == TRANS_RESULT_FAIL) {
err = -EIO;
} else if (rtsx->trans_result == TRANS_RESULT_OK) {
err = 0;
}
spin_unlock_irq(&rtsx->reg_lock);
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
return err;
}
int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
void *buf, size_t len, int use_sg, unsigned int *index,
unsigned int *offset, enum dma_data_direction dma_dir,
int timeout)
{
int err = 0;
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
err = rtsx_transfer_sglist_adma_partial(chip, card,
(struct scatterlist *)buf, use_sg,
index, offset, (int)len, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
int use_sg, enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
RTSX_DEBUGP("use_sg = %d\n", use_sg);
/* don't transfer data during abort processing */
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
if (use_sg) {
err = rtsx_transfer_sglist_adma(chip, card,
(struct scatterlist *)buf,
use_sg, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
}
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
rtsx_reinit_cards(chip, 1);
}
}
return err;
}
| gpl-2.0 |
ak-67/kernel_mediatek_wiko | drivers/net/wireless/orinoco/main.c | 5086 | 64907 | /* main.c - (formerly known as dldwd_cs.c, orinoco_cs.c and orinoco.c)
*
* A driver for Hermes or Prism 2 chipset based PCMCIA wireless
* adaptors, with Lucent/Agere, Intersil or Symbol firmware.
*
* Current maintainers (as of 29 September 2003) are:
* Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
* (C) Copyright David Gibson, IBM Corporation 2001-2003.
* Copyright (C) 2000 David Gibson, Linuxcare Australia.
* With some help from :
* Copyright (C) 2001 Jean Tourrilhes, HP Labs
* Copyright (C) 2001 Benjamin Herrenschmidt
*
* Based on dummy_cs.c 1.27 2000/06/12 21:27:25
*
* Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy
* AT fasta.fh-dortmund.de>
* http://www.stud.fh-dortmund.de/~andy/wvlan/
*
* The contents of this file are subject to the Mozilla Public License
* Version 1.1 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License
* at http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and
* limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds AT users.sourceforge.net>. Portions created by David
* A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights
* Reserved.
*
* Alternatively, the contents of this file may be used under the
* terms of the GNU General Public License version 2 (the "GPL"), in
* which case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the MPL, indicate your decision by
* deleting the provisions above and replace them with the notice and
* other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file
* under either the MPL or the GPL. */
/*
* TODO
* o Handle de-encapsulation within network layer, provide 802.11
* headers (patch from Thomas 'Dent' Mirlacher)
* o Fix possible races in SPY handling.
* o Disconnect wireless extensions from fundamental configuration.
* o (maybe) Software WEP support (patch from Stano Meduna).
* o (maybe) Use multiple Tx buffers - driver handling queue
* rather than firmware.
*/
/* Locking and synchronization:
*
* The basic principle is that everything is serialized through a
* single spinlock, priv->lock. The lock is used in user, bh and irq
* context, so when taken outside hardirq context it should always be
* taken with interrupts disabled. The lock protects both the
* hardware and the struct orinoco_private.
*
* Another flag, priv->hw_unavailable indicates that the hardware is
* unavailable for an extended period of time (e.g. suspended, or in
* the middle of a hard reset). This flag is protected by the
* spinlock. All code which touches the hardware should check the
* flag after taking the lock, and if it is set, give up on whatever
* they are doing and drop the lock again. The orinoco_lock()
* function handles this (it unlocks and returns -EBUSY if
* hw_unavailable is non-zero).
*/
#define DRIVER_NAME "orinoco"
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/suspend.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include "hermes_rid.h"
#include "hermes_dld.h"
#include "hw.h"
#include "scan.h"
#include "mic.h"
#include "fw.h"
#include "wext.h"
#include "cfg.h"
#include "main.h"
#include "orinoco.h"
/********************************************************************/
/* Module information */
/********************************************************************/
MODULE_AUTHOR("Pavel Roskin <proski@gnu.org> & "
"David Gibson <hermes@gibson.dropbear.id.au>");
MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based "
"and similar wireless cards");
MODULE_LICENSE("Dual MPL/GPL");
/* Level of debugging. Used in the macros in orinoco.h */
#ifdef ORINOCO_DEBUG
int orinoco_debug = ORINOCO_DEBUG;
EXPORT_SYMBOL(orinoco_debug);
module_param(orinoco_debug, int, 0644);
MODULE_PARM_DESC(orinoco_debug, "Debug level");
#endif
static bool suppress_linkstatus; /* = 0 */
module_param(suppress_linkstatus, bool, 0644);
MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
static int ignore_disconnect; /* = 0 */
module_param(ignore_disconnect, int, 0644);
MODULE_PARM_DESC(ignore_disconnect,
"Don't report lost link to the network layer");
int force_monitor; /* = 0 */
module_param(force_monitor, int, 0644);
MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
/********************************************************************/
/* Internal constants */
/********************************************************************/
/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
#define ORINOCO_MIN_MTU 256
#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
#define MAX_IRQLOOPS_PER_IRQ 10
#define MAX_IRQLOOPS_PER_JIFFY (20000 / HZ) /* Based on a guestimate of
* how many events the
* device could
* legitimately generate */
#define DUMMY_FID 0xFFFF
/*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \
HERMES_MAX_MULTICAST : 0)*/
#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
| HERMES_EV_TX | HERMES_EV_TXEXC \
| HERMES_EV_WTERR | HERMES_EV_INFO \
| HERMES_EV_INFDROP)
/********************************************************************/
/* Data types */
/********************************************************************/
/* Beginning of the Tx descriptor, used in TxExc handling */
struct hermes_txexc_data {
struct hermes_tx_descriptor desc;
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
} __packed;
/* Rx frame header except compatibility 802.3 header */
struct hermes_rx_descriptor {
/* Control */
__le16 status;
__le32 time;
u8 silence;
u8 signal;
u8 rate;
u8 rxflow;
__le32 reserved;
/* 802.11 header */
__le16 frame_ctl;
__le16 duration_id;
u8 addr1[ETH_ALEN];
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
__le16 seq_ctl;
u8 addr4[ETH_ALEN];
/* Data length */
__le16 data_len;
} __packed;
struct orinoco_rx_data {
struct hermes_rx_descriptor *desc;
struct sk_buff *skb;
struct list_head list;
};
struct orinoco_scan_data {
void *buf;
size_t len;
int type;
struct list_head list;
};
/********************************************************************/
/* Function prototypes */
/********************************************************************/
static int __orinoco_set_multicast_list(struct net_device *dev);
static int __orinoco_up(struct orinoco_private *priv);
static int __orinoco_down(struct orinoco_private *priv);
static int __orinoco_commit(struct orinoco_private *priv);
/********************************************************************/
/* Internal helper functions */
/********************************************************************/
void set_port_type(struct orinoco_private *priv)
{
switch (priv->iw_mode) {
case NL80211_IFTYPE_STATION:
priv->port_type = 1;
priv->createibss = 0;
break;
case NL80211_IFTYPE_ADHOC:
if (priv->prefer_port3) {
priv->port_type = 3;
priv->createibss = 0;
} else {
priv->port_type = priv->ibss_port;
priv->createibss = 1;
}
break;
case NL80211_IFTYPE_MONITOR:
priv->port_type = 3;
priv->createibss = 0;
break;
default:
printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n",
priv->ndev->name);
}
}
/********************************************************************/
/* Device methods */
/********************************************************************/
int orinoco_open(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
int err;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = __orinoco_up(priv);
if (!err)
priv->open = 1;
orinoco_unlock(priv, &flags);
return err;
}
EXPORT_SYMBOL(orinoco_open);
int orinoco_stop(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
/* We mustn't use orinoco_lock() here, because we need to be
able to close the interface even if hw_unavailable is set
(e.g. as we're released after a PC Card removal) */
orinoco_lock_irq(priv);
priv->open = 0;
err = __orinoco_down(priv);
orinoco_unlock_irq(priv);
return err;
}
EXPORT_SYMBOL(orinoco_stop);
struct net_device_stats *orinoco_get_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
return &priv->stats;
}
EXPORT_SYMBOL(orinoco_get_stats);
void orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0) {
printk(KERN_DEBUG "%s: orinoco_set_multicast_list() "
"called when hw_unavailable\n", dev->name);
return;
}
__orinoco_set_multicast_list(dev);
orinoco_unlock(priv, &flags);
}
EXPORT_SYMBOL(orinoco_set_multicast_list);
int orinoco_change_mtu(struct net_device *dev, int new_mtu)
{
struct orinoco_private *priv = ndev_priv(dev);
if ((new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU))
return -EINVAL;
/* MTU + encapsulation + header length */
if ((new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
(priv->nicbuf_size - ETH_HLEN))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL(orinoco_change_mtu);
/********************************************************************/
/* Tx path */
/********************************************************************/
/* Add encapsulation and MIC to the existing SKB.
* The main xmit routine will then send the whole lot to the card.
* Need 8 bytes headroom
* Need 8 bytes tailroom
*
* With encapsulated ethernet II frame
* --------
* 803.3 header (14 bytes)
* dst[6]
* -------- src[6]
* 803.3 header (14 bytes) len[2]
* dst[6] 803.2 header (8 bytes)
* src[6] encaps[6]
* len[2] <- leave alone -> len[2]
* -------- -------- <-- 0
* Payload Payload
* ... ...
*
* -------- --------
* MIC (8 bytes)
* --------
*
* returns 0 on success, -ENOMEM on error.
*/
int orinoco_process_xmit_skb(struct sk_buff *skb,
struct net_device *dev,
struct orinoco_private *priv,
int *tx_control,
u8 *mic_buf)
{
struct orinoco_tkip_key *key;
struct ethhdr *eh;
int do_mic;
key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
(key != NULL));
if (do_mic)
*tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
HERMES_TXCTRL_MIC;
eh = (struct ethhdr *)skb->data;
/* Encapsulate Ethernet-II frames */
if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
struct header_struct {
struct ethhdr eth; /* 802.3 header */
u8 encap[6]; /* 802.2 header */
} __packed hdr;
int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
if (net_ratelimit())
printk(KERN_ERR
"%s: Not enough headroom for 802.2 headers %d\n",
dev->name, skb_headroom(skb));
return -ENOMEM;
}
/* Fill in new header */
memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
hdr.eth.h_proto = htons(len);
memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
/* Make room for the new header, and copy it in */
eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
memcpy(eh, &hdr, sizeof(hdr));
}
/* Calculate Michael MIC */
if (do_mic) {
size_t len = skb->len - ETH_HLEN;
u8 *mic = &mic_buf[0];
/* Have to write to an even address, so copy the spare
* byte across */
if (skb->len % 2) {
*mic = skb->data[skb->len - 1];
mic++;
}
orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
eh->h_dest, eh->h_source, 0 /* priority */,
skb->data + ETH_HLEN,
len, mic);
}
return 0;
}
EXPORT_SYMBOL(orinoco_process_xmit_skb);
static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
struct hermes *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
int tx_control;
unsigned long flags;
u8 mic_buf[MICHAEL_MIC_LEN + 1];
if (!netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
dev->name);
return NETDEV_TX_BUSY;
}
if (netif_queue_stopped(dev)) {
printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
dev->name);
return NETDEV_TX_BUSY;
}
if (orinoco_lock(priv, &flags) != 0) {
printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n",
dev->name);
return NETDEV_TX_BUSY;
}
if (!netif_carrier_ok(dev) ||
(priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
/* Oops, the firmware hasn't established a connection,
silently drop the packet (this seems to be the
safest approach). */
goto drop;
}
/* Check packet length */
if (skb->len < ETH_HLEN)
goto drop;
tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
&mic_buf[0]);
if (err)
goto drop;
if (priv->has_alt_txcntl) {
/* WPA enabled firmwares have tx_cntl at the end of
* the 802.11 header. So write zeroed descriptor and
* 802.11 header at the same time
*/
char desc[HERMES_802_3_OFFSET];
__le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
memset(&desc, 0, sizeof(desc));
*txcntl = cpu_to_le16(tx_control);
err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
"descriptor to BAP\n", dev->name, err);
goto busy;
}
} else {
struct hermes_tx_descriptor desc;
memset(&desc, 0, sizeof(desc));
desc.tx_control = cpu_to_le16(tx_control);
err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
txfid, 0);
if (err) {
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d writing Tx "
"descriptor to BAP\n", dev->name, err);
goto busy;
}
/* Clear the 802.11 header and data length fields - some
* firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
* if this isn't done. */
hermes_clear_words(hw, HERMES_DATA0,
HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
}
err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
txfid, HERMES_802_3_OFFSET);
if (err) {
printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
dev->name, err);
goto busy;
}
if (tx_control & HERMES_TXCTRL_MIC) {
size_t offset = HERMES_802_3_OFFSET + skb->len;
size_t len = MICHAEL_MIC_LEN;
if (offset % 2) {
offset--;
len++;
}
err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
txfid, offset);
if (err) {
printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
dev->name, err);
goto busy;
}
}
/* Finally, we actually initiate the send */
netif_stop_queue(dev);
err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
txfid, NULL);
if (err) {
netif_start_queue(dev);
if (net_ratelimit())
printk(KERN_ERR "%s: Error %d transmitting packet\n",
dev->name, err);
goto busy;
}
stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
goto ok;
drop:
stats->tx_errors++;
stats->tx_dropped++;
ok:
orinoco_unlock(priv, &flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
busy:
if (err == -EIO)
schedule_work(&priv->reset_work);
orinoco_unlock(priv, &flags);
return NETDEV_TX_BUSY;
}
static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 fid = hermes_read_regn(hw, ALLOCFID);
if (fid != priv->txfid) {
if (fid != DUMMY_FID)
printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n",
dev->name, fid);
return;
}
hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
}
static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
stats->tx_packets++;
netif_wake_queue(dev);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
}
static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
u16 fid = hermes_read_regn(hw, TXCOMPLFID);
u16 status;
struct hermes_txexc_data hdr;
int err = 0;
if (fid == DUMMY_FID)
return; /* Nothing's really happened */
/* Read part of the frame header - we need status and addr1 */
err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
sizeof(struct hermes_txexc_data),
fid, 0);
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
stats->tx_errors++;
if (err) {
printk(KERN_WARNING "%s: Unable to read descriptor on Tx error "
"(FID=%04X error %d)\n",
dev->name, fid, err);
return;
}
DEBUG(1, "%s: Tx error, err %d (FID=%04X)\n", dev->name,
err, fid);
/* We produce a TXDROP event only for retry or lifetime
* exceeded, because that's the only status that really mean
* that this particular node went away.
* Other errors means that *we* screwed up. - Jean II */
status = le16_to_cpu(hdr.desc.status);
if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
union iwreq_data wrqu;
/* Copy 802.11 dest address.
* We use the 802.11 header because the frame may
* not be 802.3 or may be mangled...
* In Ad-Hoc mode, it will be the node address.
* In managed mode, it will be most likely the AP addr
* User space will figure out how to convert it to
* whatever it needs (IP address or else).
* - Jean II */
memcpy(wrqu.addr.sa_data, hdr.addr1, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
/* Send event to user space */
wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
}
netif_wake_queue(dev);
}
void orinoco_tx_timeout(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
struct hermes *hw = &priv->hw;
printk(KERN_WARNING "%s: Tx timeout! "
"ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n",
dev->name, hermes_read_regn(hw, ALLOCFID),
hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT));
stats->tx_errors++;
schedule_work(&priv->reset_work);
}
EXPORT_SYMBOL(orinoco_tx_timeout);
/********************************************************************/
/* Rx path (data frames) */
/********************************************************************/
/* Does the frame have a SNAP header indicating it should be
* de-encapsulated to Ethernet-II? */
static inline int is_ethersnap(void *_hdr)
{
u8 *hdr = _hdr;
/* We de-encapsulate all packets which, a) have SNAP headers
* (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header
* and where b) the OUI of the SNAP header is 00:00:00 or
* 00:00:f8 - we need both because different APs appear to use
* different OUIs for some reason */
return (memcmp(hdr, &encaps_hdr, 5) == 0)
&& ((hdr[5] == 0x00) || (hdr[5] == 0xf8));
}
static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
int level, int noise)
{
struct iw_quality wstats;
wstats.level = level - 0x95;
wstats.noise = noise - 0x95;
wstats.qual = (level > noise) ? (level - noise) : 0;
wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
/* Update spy records */
wireless_spy_update(dev, mac, &wstats);
}
static void orinoco_stat_gather(struct net_device *dev,
struct sk_buff *skb,
struct hermes_rx_descriptor *desc)
{
struct orinoco_private *priv = ndev_priv(dev);
/* Using spy support with lots of Rx packets, like in an
* infrastructure (AP), will really slow down everything, because
* the MAC address must be compared to each entry of the spy list.
* If the user really asks for it (set some address in the
* spy list), we do it, but he will pay the price.
* Note that to get here, you need both WIRELESS_SPY
* compiled in AND some addresses in the list !!!
*/
/* Note : gcc will optimise the whole section away if
* WIRELESS_SPY is not defined... - Jean II */
if (SPY_NUMBER(priv)) {
orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN,
desc->signal, desc->silence);
}
}
/*
* orinoco_rx_monitor - handle received monitor frames.
*
* Arguments:
* dev network device
* rxfid received FID
* desc rx descriptor of the frame
*
* Call context: interrupt
*/
static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
struct hermes_rx_descriptor *desc)
{
u32 hdrlen = 30; /* return full header by default */
u32 datalen = 0;
u16 fc;
int err;
int len;
struct sk_buff *skb;
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
struct hermes *hw = &priv->hw;
len = le16_to_cpu(desc->data_len);
/* Determine the size of the header and the data */
fc = le16_to_cpu(desc->frame_ctl);
switch (fc & IEEE80211_FCTL_FTYPE) {
case IEEE80211_FTYPE_DATA:
if ((fc & IEEE80211_FCTL_TODS)
&& (fc & IEEE80211_FCTL_FROMDS))
hdrlen = 30;
else
hdrlen = 24;
datalen = len;
break;
case IEEE80211_FTYPE_MGMT:
hdrlen = 24;
datalen = len;
break;
case IEEE80211_FTYPE_CTL:
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PSPOLL:
case IEEE80211_STYPE_RTS:
case IEEE80211_STYPE_CFEND:
case IEEE80211_STYPE_CFENDACK:
hdrlen = 16;
break;
case IEEE80211_STYPE_CTS:
case IEEE80211_STYPE_ACK:
hdrlen = 10;
break;
}
break;
default:
/* Unknown frame type */
break;
}
/* sanity check the length */
if (datalen > IEEE80211_MAX_DATA_LEN + 12) {
printk(KERN_DEBUG "%s: oversized monitor frame, "
"data length = %d\n", dev->name, datalen);
stats->rx_length_errors++;
goto update_stats;
}
skb = dev_alloc_skb(hdrlen + datalen);
if (!skb) {
printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n",
dev->name);
goto update_stats;
}
/* Copy the 802.11 header to the skb */
memcpy(skb_put(skb, hdrlen), &(desc->frame_ctl), hdrlen);
skb_reset_mac_header(skb);
/* If any, copy the data from the card to the skb */
if (datalen > 0) {
err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
ALIGN(datalen, 2), rxfid,
HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading monitor frame\n",
dev->name, err);
goto drop;
}
}
skb->dev = dev;
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = cpu_to_be16(ETH_P_802_2);
stats->rx_packets++;
stats->rx_bytes += skb->len;
netif_rx(skb);
return;
drop:
dev_kfree_skb_irq(skb);
update_stats:
stats->rx_errors++;
stats->rx_dropped++;
}
void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
struct iw_statistics *wstats = &priv->wstats;
struct sk_buff *skb = NULL;
u16 rxfid, status;
int length;
struct hermes_rx_descriptor *desc;
struct orinoco_rx_data *rx_data;
int err;
desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
if (!desc) {
printk(KERN_WARNING
"%s: Can't allocate space for RX descriptor\n",
dev->name);
goto update_stats;
}
rxfid = hermes_read_regn(hw, RXFID);
err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
rxfid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading Rx descriptor. "
"Frame dropped.\n", dev->name, err);
goto update_stats;
}
status = le16_to_cpu(desc->status);
if (status & HERMES_RXSTAT_BADCRC) {
DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
dev->name);
stats->rx_crc_errors++;
goto update_stats;
}
/* Handle frames in monitor mode */
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
orinoco_rx_monitor(dev, rxfid, desc);
goto out;
}
if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n",
dev->name);
wstats->discard.code++;
goto update_stats;
}
length = le16_to_cpu(desc->data_len);
/* Sanity checks */
if (length < 3) { /* No for even an 802.2 LLC header */
/* At least on Symbol firmware with PCF we get quite a
lot of these legitimately - Poll frames with no
data. */
goto out;
}
if (length > IEEE80211_MAX_DATA_LEN) {
printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
dev->name, length);
stats->rx_length_errors++;
goto update_stats;
}
/* Payload size does not include Michael MIC. Increase payload
* size to read it together with the data. */
if (status & HERMES_RXSTAT_MIC)
length += MICHAEL_MIC_LEN;
/* We need space for the packet data itself, plus an ethernet
header, plus 2 bytes so we can align the IP header on a
32bit boundary, plus 1 byte so we can read in odd length
packets from the card, which has an IO granularity of 16
bits */
skb = dev_alloc_skb(length + ETH_HLEN + 2 + 1);
if (!skb) {
printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
dev->name);
goto update_stats;
}
/* We'll prepend the header, so reserve space for it. The worst
case is no decapsulation, when 802.3 header is prepended and
nothing is removed. 2 is for aligning the IP header. */
skb_reserve(skb, ETH_HLEN + 2);
err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
ALIGN(length, 2), rxfid,
HERMES_802_2_OFFSET);
if (err) {
printk(KERN_ERR "%s: error %d reading frame. "
"Frame dropped.\n", dev->name, err);
goto drop;
}
/* Add desc and skb to rx queue */
rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
if (!rx_data)
goto drop;
rx_data->desc = desc;
rx_data->skb = skb;
list_add_tail(&rx_data->list, &priv->rx_list);
tasklet_schedule(&priv->rx_tasklet);
return;
drop:
dev_kfree_skb_irq(skb);
update_stats:
stats->rx_errors++;
stats->rx_dropped++;
out:
kfree(desc);
}
EXPORT_SYMBOL(__orinoco_ev_rx);
static void orinoco_rx(struct net_device *dev,
struct hermes_rx_descriptor *desc,
struct sk_buff *skb)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
u16 status, fc;
int length;
struct ethhdr *hdr;
status = le16_to_cpu(desc->status);
length = le16_to_cpu(desc->data_len);
fc = le16_to_cpu(desc->frame_ctl);
/* Calculate and check MIC */
if (status & HERMES_RXSTAT_MIC) {
struct orinoco_tkip_key *key;
int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
HERMES_MIC_KEY_ID_SHIFT);
u8 mic[MICHAEL_MIC_LEN];
u8 *rxmic;
u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
desc->addr3 : desc->addr2;
/* Extract Michael MIC from payload */
rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
length -= MICHAEL_MIC_LEN;
key = (struct orinoco_tkip_key *) priv->keys[key_id].key;
if (!key) {
printk(KERN_WARNING "%s: Received encrypted frame from "
"%pM using key %i, but key is not installed\n",
dev->name, src, key_id);
goto drop;
}
orinoco_mic(priv->rx_tfm_mic, key->rx_mic, desc->addr1, src,
0, /* priority or QoS? */
skb->data, skb->len, &mic[0]);
if (memcmp(mic, rxmic,
MICHAEL_MIC_LEN)) {
union iwreq_data wrqu;
struct iw_michaelmicfailure wxmic;
printk(KERN_WARNING "%s: "
"Invalid Michael MIC in data frame from %pM, "
"using key %i\n",
dev->name, src, key_id);
/* TODO: update stats */
/* Notify userspace */
memset(&wxmic, 0, sizeof(wxmic));
wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
wxmic.flags |= (desc->addr1[0] & 1) ?
IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
wxmic.src_addr.sa_family = ARPHRD_ETHER;
memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
(void) orinoco_hw_get_tkip_iv(priv, key_id,
&wxmic.tsc[0]);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = sizeof(wxmic);
wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
(char *) &wxmic);
goto drop;
}
}
/* Handle decapsulation
* In most cases, the firmware tell us about SNAP frames.
* For some reason, the SNAP frames sent by LinkSys APs
* are not properly recognised by most firmwares.
* So, check ourselves */
if (length >= ENCAPS_OVERHEAD &&
(((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) ||
((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) ||
is_ethersnap(skb->data))) {
/* These indicate a SNAP within 802.2 LLC within
802.11 frame which we'll need to de-encapsulate to
the original EthernetII frame. */
hdr = (struct ethhdr *)skb_push(skb,
ETH_HLEN - ENCAPS_OVERHEAD);
} else {
/* 802.3 frame - prepend 802.3 header as is */
hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
hdr->h_proto = htons(length);
}
memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
if (fc & IEEE80211_FCTL_FROMDS)
memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
else
memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_NONE;
if (fc & IEEE80211_FCTL_TODS)
skb->pkt_type = PACKET_OTHERHOST;
/* Process the wireless stats if needed */
orinoco_stat_gather(dev, skb, desc);
/* Pass the packet to the networking stack */
netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += length;
return;
drop:
dev_kfree_skb(skb);
stats->rx_errors++;
stats->rx_dropped++;
}
static void orinoco_rx_isr_tasklet(unsigned long data)
{
struct orinoco_private *priv = (struct orinoco_private *) data;
struct net_device *dev = priv->ndev;
struct orinoco_rx_data *rx_data, *temp;
struct hermes_rx_descriptor *desc;
struct sk_buff *skb;
unsigned long flags;
/* orinoco_rx requires the driver lock, and we also need to
* protect priv->rx_list, so just hold the lock over the
* lot.
*
* If orinoco_lock fails, we've unplugged the card. In this
* case just abort. */
if (orinoco_lock(priv, &flags) != 0)
return;
/* extract desc and skb from queue */
list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
desc = rx_data->desc;
skb = rx_data->skb;
list_del(&rx_data->list);
kfree(rx_data);
orinoco_rx(dev, desc, skb);
kfree(desc);
}
orinoco_unlock(priv, &flags);
}
/********************************************************************/
/* Rx path (info frames) */
/********************************************************************/
static void print_linkstatus(struct net_device *dev, u16 status)
{
char *s;
if (suppress_linkstatus)
return;
switch (status) {
case HERMES_LINKSTATUS_NOT_CONNECTED:
s = "Not Connected";
break;
case HERMES_LINKSTATUS_CONNECTED:
s = "Connected";
break;
case HERMES_LINKSTATUS_DISCONNECTED:
s = "Disconnected";
break;
case HERMES_LINKSTATUS_AP_CHANGE:
s = "AP Changed";
break;
case HERMES_LINKSTATUS_AP_OUT_OF_RANGE:
s = "AP Out of Range";
break;
case HERMES_LINKSTATUS_AP_IN_RANGE:
s = "AP In Range";
break;
case HERMES_LINKSTATUS_ASSOC_FAILED:
s = "Association Failed";
break;
default:
s = "UNKNOWN";
}
printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
dev->name, s, status);
}
/* Search scan results for requested BSSID, join it if found */
static void orinoco_join_ap(struct work_struct *work)
{
struct orinoco_private *priv =
container_of(work, struct orinoco_private, join_work);
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
unsigned long flags;
struct join_req {
u8 bssid[ETH_ALEN];
__le16 channel;
} __packed req;
const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
struct prism2_scan_apinfo *atom = NULL;
int offset = 4;
int found = 0;
u8 *buf;
u16 len;
/* Allocate buffer for scan results */
buf = kmalloc(MAX_SCAN_LEN, GFP_KERNEL);
if (!buf)
return;
if (orinoco_lock(priv, &flags) != 0)
goto fail_lock;
/* Sanity checks in case user changed something in the meantime */
if (!priv->bssid_fixed)
goto out;
if (strlen(priv->desired_essid) == 0)
goto out;
/* Read scan results from the firmware */
err = hw->ops->read_ltv(hw, USER_BAP,
HERMES_RID_SCANRESULTSTABLE,
MAX_SCAN_LEN, &len, buf);
if (err) {
printk(KERN_ERR "%s: Cannot read scan results\n",
dev->name);
goto out;
}
len = HERMES_RECLEN_TO_BYTES(len);
/* Go through the scan results looking for the channel of the AP
* we were requested to join */
for (; offset + atom_len <= len; offset += atom_len) {
atom = (struct prism2_scan_apinfo *) (buf + offset);
if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) {
found = 1;
break;
}
}
if (!found) {
DEBUG(1, "%s: Requested AP not found in scan results\n",
dev->name);
goto out;
}
memcpy(req.bssid, priv->desired_bssid, ETH_ALEN);
req.channel = atom->channel; /* both are little-endian */
err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST,
&req);
if (err)
printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
out:
orinoco_unlock(priv, &flags);
fail_lock:
kfree(buf);
}
/* Send new BSSID to userspace */
static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
union iwreq_data wrqu;
int err;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
if (err != 0)
return;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
/* Send event to user space */
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
union iwreq_data wrqu;
int err;
u8 buf[88];
u8 *ie;
if (!priv->has_wpa)
return;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
sizeof(buf), NULL, &buf);
if (err != 0)
return;
ie = orinoco_get_wpa_ie(buf, sizeof(buf));
if (ie) {
int rem = sizeof(buf) - (ie - &buf[0]);
wrqu.data.length = ie[1] + 2;
if (wrqu.data.length > rem)
wrqu.data.length = rem;
if (wrqu.data.length)
/* Send event to user space */
wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
}
}
static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
union iwreq_data wrqu;
int err;
u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
u8 *ie;
if (!priv->has_wpa)
return;
err = hw->ops->read_ltv(hw, USER_BAP,
HERMES_RID_CURRENT_ASSOC_RESP_INFO,
sizeof(buf), NULL, &buf);
if (err != 0)
return;
ie = orinoco_get_wpa_ie(buf, sizeof(buf));
if (ie) {
int rem = sizeof(buf) - (ie - &buf[0]);
wrqu.data.length = ie[1] + 2;
if (wrqu.data.length > rem)
wrqu.data.length = rem;
if (wrqu.data.length)
/* Send event to user space */
wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
}
}
static void orinoco_send_wevents(struct work_struct *work)
{
struct orinoco_private *priv =
container_of(work, struct orinoco_private, wevent_work);
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return;
orinoco_send_assocreqie_wevent(priv);
orinoco_send_assocrespie_wevent(priv);
orinoco_send_bssid_wevent(priv);
orinoco_unlock(priv, &flags);
}
static void qbuf_scan(struct orinoco_private *priv, void *buf,
int len, int type)
{
struct orinoco_scan_data *sd;
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
if (!sd) {
printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
return;
}
sd->buf = buf;
sd->len = len;
sd->type = type;
spin_lock_irqsave(&priv->scan_lock, flags);
list_add_tail(&sd->list, &priv->scan_list);
spin_unlock_irqrestore(&priv->scan_lock, flags);
schedule_work(&priv->process_scan);
}
static void qabort_scan(struct orinoco_private *priv)
{
struct orinoco_scan_data *sd;
unsigned long flags;
sd = kmalloc(sizeof(*sd), GFP_ATOMIC);
if (!sd) {
printk(KERN_ERR "%s: failed to alloc memory\n", __func__);
return;
}
sd->len = -1; /* Abort */
spin_lock_irqsave(&priv->scan_lock, flags);
list_add_tail(&sd->list, &priv->scan_list);
spin_unlock_irqrestore(&priv->scan_lock, flags);
schedule_work(&priv->process_scan);
}
static void orinoco_process_scan_results(struct work_struct *work)
{
struct orinoco_private *priv =
container_of(work, struct orinoco_private, process_scan);
struct orinoco_scan_data *sd, *temp;
unsigned long flags;
void *buf;
int len;
int type;
spin_lock_irqsave(&priv->scan_lock, flags);
list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
buf = sd->buf;
len = sd->len;
type = sd->type;
list_del(&sd->list);
spin_unlock_irqrestore(&priv->scan_lock, flags);
kfree(sd);
if (len > 0) {
if (type == HERMES_INQ_CHANNELINFO)
orinoco_add_extscan_result(priv, buf, len);
else
orinoco_add_hostscan_results(priv, buf, len);
kfree(buf);
} else {
/* Either abort or complete the scan */
orinoco_scan_done(priv, (len < 0));
}
spin_lock_irqsave(&priv->scan_lock, flags);
}
spin_unlock_irqrestore(&priv->scan_lock, flags);
}
void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 infofid;
struct {
__le16 len;
__le16 type;
} __packed info;
int len, type;
int err;
/* This is an answer to an INQUIRE command that we did earlier,
* or an information "event" generated by the card
* The controller return to us a pseudo frame containing
* the information in question - Jean II */
infofid = hermes_read_regn(hw, INFOFID);
/* Read the info frame header - don't try too hard */
err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
infofid, 0);
if (err) {
printk(KERN_ERR "%s: error %d reading info frame. "
"Frame dropped.\n", dev->name, err);
return;
}
len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len));
type = le16_to_cpu(info.type);
switch (type) {
case HERMES_INQ_TALLIES: {
struct hermes_tallies_frame tallies;
struct iw_statistics *wstats = &priv->wstats;
if (len > sizeof(tallies)) {
printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n",
dev->name, len);
len = sizeof(tallies);
}
err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
infofid, sizeof(info));
if (err)
break;
/* Increment our various counters */
/* wstats->discard.nwid - no wrong BSSID stuff */
wstats->discard.code +=
le16_to_cpu(tallies.RxWEPUndecryptable);
if (len == sizeof(tallies))
wstats->discard.code +=
le16_to_cpu(tallies.RxDiscards_WEPICVError) +
le16_to_cpu(tallies.RxDiscards_WEPExcluded);
wstats->discard.misc +=
le16_to_cpu(tallies.TxDiscardsWrongSA);
wstats->discard.fragment +=
le16_to_cpu(tallies.RxMsgInBadMsgFragments);
wstats->discard.retries +=
le16_to_cpu(tallies.TxRetryLimitExceeded);
/* wstats->miss.beacon - no match */
}
break;
case HERMES_INQ_LINKSTATUS: {
struct hermes_linkstatus linkstatus;
u16 newstatus;
int connected;
if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
break;
if (len != sizeof(linkstatus)) {
printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n",
dev->name, len);
break;
}
err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
infofid, sizeof(info));
if (err)
break;
newstatus = le16_to_cpu(linkstatus.linkstatus);
/* Symbol firmware uses "out of range" to signal that
* the hostscan frame can be requested. */
if (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE &&
priv->firmware_type == FIRMWARE_TYPE_SYMBOL &&
priv->has_hostscan && priv->scan_request) {
hermes_inquire(hw, HERMES_INQ_HOSTSCAN_SYMBOL);
break;
}
connected = (newstatus == HERMES_LINKSTATUS_CONNECTED)
|| (newstatus == HERMES_LINKSTATUS_AP_CHANGE)
|| (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE);
if (connected)
netif_carrier_on(dev);
else if (!ignore_disconnect)
netif_carrier_off(dev);
if (newstatus != priv->last_linkstatus) {
priv->last_linkstatus = newstatus;
print_linkstatus(dev, newstatus);
/* The info frame contains only one word which is the
* status (see hermes.h). The status is pretty boring
* in itself, that's why we export the new BSSID...
* Jean II */
schedule_work(&priv->wevent_work);
}
}
break;
case HERMES_INQ_SCAN:
if (!priv->scan_request && priv->bssid_fixed &&
priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
schedule_work(&priv->join_work);
break;
}
/* fall through */
case HERMES_INQ_HOSTSCAN:
case HERMES_INQ_HOSTSCAN_SYMBOL: {
/* Result of a scanning. Contains information about
* cells in the vicinity - Jean II */
unsigned char *buf;
/* Sanity check */
if (len > 4096) {
printk(KERN_WARNING "%s: Scan results too large (%d bytes)\n",
dev->name, len);
qabort_scan(priv);
break;
}
/* Allocate buffer for results */
buf = kmalloc(len, GFP_ATOMIC);
if (buf == NULL) {
/* No memory, so can't printk()... */
qabort_scan(priv);
break;
}
/* Read scan data */
err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
infofid, sizeof(info));
if (err) {
kfree(buf);
qabort_scan(priv);
break;
}
#ifdef ORINOCO_DEBUG
{
int i;
printk(KERN_DEBUG "Scan result [%02X", buf[0]);
for (i = 1; i < (len * 2); i++)
printk(":%02X", buf[i]);
printk("]\n");
}
#endif /* ORINOCO_DEBUG */
qbuf_scan(priv, buf, len, type);
}
break;
case HERMES_INQ_CHANNELINFO:
{
struct agere_ext_scan_info *bss;
if (!priv->scan_request) {
printk(KERN_DEBUG "%s: Got chaninfo without scan, "
"len=%d\n", dev->name, len);
break;
}
/* An empty result indicates that the scan is complete */
if (len == 0) {
qbuf_scan(priv, NULL, len, type);
break;
}
/* Sanity check */
else if (len < (offsetof(struct agere_ext_scan_info,
data) + 2)) {
/* Drop this result now so we don't have to
* keep checking later */
printk(KERN_WARNING
"%s: Ext scan results too short (%d bytes)\n",
dev->name, len);
break;
}
bss = kmalloc(len, GFP_ATOMIC);
if (bss == NULL)
break;
/* Read scan data */
err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
infofid, sizeof(info));
if (err)
kfree(bss);
else
qbuf_scan(priv, bss, len, type);
break;
}
case HERMES_INQ_SEC_STAT_AGERE:
/* Security status (Agere specific) */
/* Ignore this frame for now */
if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
break;
/* fall through */
default:
printk(KERN_DEBUG "%s: Unknown information frame received: "
"type 0x%04x, length %d\n", dev->name, type, len);
/* We don't actually do anything about it */
break;
}
}
EXPORT_SYMBOL(__orinoco_ev_info);
static void __orinoco_ev_infdrop(struct net_device *dev, struct hermes *hw)
{
if (net_ratelimit())
printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
}
/********************************************************************/
/* Internal hardware control routines */
/********************************************************************/
static int __orinoco_up(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
netif_carrier_off(dev); /* just to make sure */
err = __orinoco_commit(priv);
if (err) {
printk(KERN_ERR "%s: Error %d configuring card\n",
dev->name, err);
return err;
}
/* Fire things up again */
hermes_set_irqmask(hw, ORINOCO_INTEN);
err = hermes_enable_port(hw, 0);
if (err) {
printk(KERN_ERR "%s: Error %d enabling MAC port\n",
dev->name, err);
return err;
}
netif_start_queue(dev);
return 0;
}
static int __orinoco_down(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
netif_stop_queue(dev);
if (!priv->hw_unavailable) {
if (!priv->broken_disableport) {
err = hermes_disable_port(hw, 0);
if (err) {
/* Some firmwares (e.g. Intersil 1.3.x) seem
* to have problems disabling the port, oh
* well, too bad. */
printk(KERN_WARNING "%s: Error %d disabling MAC port\n",
dev->name, err);
priv->broken_disableport = 1;
}
}
hermes_set_irqmask(hw, 0);
hermes_write_regn(hw, EVACK, 0xffff);
}
orinoco_scan_done(priv, true);
/* firmware will have to reassociate */
netif_carrier_off(dev);
priv->last_linkstatus = 0xffff;
return 0;
}
static int orinoco_reinit_firmware(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
int err;
err = hw->ops->init(hw);
if (priv->do_fw_download && !err) {
err = orinoco_download(priv);
if (err)
priv->do_fw_download = 0;
}
if (!err)
err = orinoco_hw_allocate_fid(priv);
return err;
}
static int
__orinoco_set_multicast_list(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
int promisc, mc_count;
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
mc_count = netdev_mc_count(dev);
}
err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc);
return err;
}
/* This must be called from user context, without locks held - use
* schedule_work() */
void orinoco_reset(struct work_struct *work)
{
struct orinoco_private *priv =
container_of(work, struct orinoco_private, reset_work);
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
/* When the hardware becomes available again, whatever
* detects that is responsible for re-initializing
* it. So no need for anything further */
return;
netif_stop_queue(dev);
/* Shut off interrupts. Depending on what state the hardware
* is in, this might not work, but we'll try anyway */
hermes_set_irqmask(hw, 0);
hermes_write_regn(hw, EVACK, 0xffff);
priv->hw_unavailable++;
priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */
netif_carrier_off(dev);
orinoco_unlock(priv, &flags);
/* Scanning support: Notify scan cancellation */
orinoco_scan_done(priv, true);
if (priv->hard_reset) {
err = (*priv->hard_reset)(priv);
if (err) {
printk(KERN_ERR "%s: orinoco_reset: Error %d "
"performing hard reset\n", dev->name, err);
goto disable;
}
}
err = orinoco_reinit_firmware(priv);
if (err) {
printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
dev->name, err);
goto disable;
}
/* This has to be called from user context */
orinoco_lock_irq(priv);
priv->hw_unavailable--;
/* priv->open or priv->hw_unavailable might have changed while
* we dropped the lock */
if (priv->open && (!priv->hw_unavailable)) {
err = __orinoco_up(priv);
if (err) {
printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
dev->name, err);
} else
dev->trans_start = jiffies;
}
orinoco_unlock_irq(priv);
return;
disable:
hermes_set_irqmask(hw, 0);
netif_device_detach(dev);
printk(KERN_ERR "%s: Device has been disabled!\n", dev->name);
}
static int __orinoco_commit(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
int err = 0;
/* If we've called commit, we are reconfiguring or bringing the
* interface up. Maintaining countermeasures across this would
* be confusing, so note that we've disabled them. The port will
* be enabled later in orinoco_commit or __orinoco_up. */
priv->tkip_cm_active = 0;
err = orinoco_hw_program_rids(priv);
/* FIXME: what about netif_tx_lock */
(void) __orinoco_set_multicast_list(dev);
return err;
}
/* Ensures configuration changes are applied. May result in a reset.
* The caller should hold priv->lock
*/
int orinoco_commit(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int err;
if (priv->broken_disableport) {
schedule_work(&priv->reset_work);
return 0;
}
err = hermes_disable_port(hw, 0);
if (err) {
printk(KERN_WARNING "%s: Unable to disable port "
"while reconfiguring card\n", dev->name);
priv->broken_disableport = 1;
goto out;
}
err = __orinoco_commit(priv);
if (err) {
printk(KERN_WARNING "%s: Unable to reconfigure card\n",
dev->name);
goto out;
}
err = hermes_enable_port(hw, 0);
if (err) {
printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n",
dev->name);
goto out;
}
out:
if (err) {
printk(KERN_WARNING "%s: Resetting instead...\n", dev->name);
schedule_work(&priv->reset_work);
err = 0;
}
return err;
}
/********************************************************************/
/* Interrupt handler */
/********************************************************************/
static void __orinoco_ev_tick(struct net_device *dev, struct hermes *hw)
{
printk(KERN_DEBUG "%s: TICK\n", dev->name);
}
static void __orinoco_ev_wterr(struct net_device *dev, struct hermes *hw)
{
/* This seems to happen a fair bit under load, but ignoring it
seems to work fine...*/
printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n",
dev->name);
}
irqreturn_t orinoco_interrupt(int irq, void *dev_id)
{
struct orinoco_private *priv = dev_id;
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
int count = MAX_IRQLOOPS_PER_IRQ;
u16 evstat, events;
/* These are used to detect a runaway interrupt situation.
*
* If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy,
* we panic and shut down the hardware
*/
/* jiffies value the last time we were called */
static int last_irq_jiffy; /* = 0 */
static int loops_this_jiffy; /* = 0 */
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0) {
/* If hw is unavailable - we don't know if the irq was
* for us or not */
return IRQ_HANDLED;
}
evstat = hermes_read_regn(hw, EVSTAT);
events = evstat & hw->inten;
if (!events) {
orinoco_unlock(priv, &flags);
return IRQ_NONE;
}
if (jiffies != last_irq_jiffy)
loops_this_jiffy = 0;
last_irq_jiffy = jiffies;
while (events && count--) {
if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) {
printk(KERN_WARNING "%s: IRQ handler is looping too "
"much! Resetting.\n", dev->name);
/* Disable interrupts for now */
hermes_set_irqmask(hw, 0);
schedule_work(&priv->reset_work);
break;
}
/* Check the card hasn't been removed */
if (!hermes_present(hw)) {
DEBUG(0, "orinoco_interrupt(): card removed\n");
break;
}
if (events & HERMES_EV_TICK)
__orinoco_ev_tick(dev, hw);
if (events & HERMES_EV_WTERR)
__orinoco_ev_wterr(dev, hw);
if (events & HERMES_EV_INFDROP)
__orinoco_ev_infdrop(dev, hw);
if (events & HERMES_EV_INFO)
__orinoco_ev_info(dev, hw);
if (events & HERMES_EV_RX)
__orinoco_ev_rx(dev, hw);
if (events & HERMES_EV_TXEXC)
__orinoco_ev_txexc(dev, hw);
if (events & HERMES_EV_TX)
__orinoco_ev_tx(dev, hw);
if (events & HERMES_EV_ALLOC)
__orinoco_ev_alloc(dev, hw);
hermes_write_regn(hw, EVACK, evstat);
evstat = hermes_read_regn(hw, EVSTAT);
events = evstat & hw->inten;
}
orinoco_unlock(priv, &flags);
return IRQ_HANDLED;
}
EXPORT_SYMBOL(orinoco_interrupt);
/********************************************************************/
/* Power management */
/********************************************************************/
#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT)
static int orinoco_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event,
void *unused)
{
struct orinoco_private *priv = container_of(notifier,
struct orinoco_private,
pm_notifier);
/* All we need to do is cache the firmware before suspend, and
* release it when we come out.
*
* Only need to do this if we're downloading firmware. */
if (!priv->do_fw_download)
return NOTIFY_DONE;
switch (pm_event) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
orinoco_cache_fw(priv, 0);
break;
case PM_POST_RESTORE:
/* Restore from hibernation failed. We need to clean
* up in exactly the same way, so fall through. */
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
orinoco_uncache_fw(priv);
break;
case PM_RESTORE_PREPARE:
default:
break;
}
return NOTIFY_DONE;
}
static void orinoco_register_pm_notifier(struct orinoco_private *priv)
{
priv->pm_notifier.notifier_call = orinoco_pm_notifier;
register_pm_notifier(&priv->pm_notifier);
}
static void orinoco_unregister_pm_notifier(struct orinoco_private *priv)
{
unregister_pm_notifier(&priv->pm_notifier);
}
#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
#define orinoco_register_pm_notifier(priv) do { } while (0)
#define orinoco_unregister_pm_notifier(priv) do { } while (0)
#endif
/********************************************************************/
/* Initialization */
/********************************************************************/
int orinoco_init(struct orinoco_private *priv)
{
struct device *dev = priv->dev;
struct wiphy *wiphy = priv_to_wiphy(priv);
struct hermes *hw = &priv->hw;
int err = 0;
/* No need to lock, the hw_unavailable flag is already set in
* alloc_orinocodev() */
priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
/* Initialize the firmware */
err = hw->ops->init(hw);
if (err != 0) {
dev_err(dev, "Failed to initialize firmware (err = %d)\n",
err);
goto out;
}
err = determine_fw_capabilities(priv, wiphy->fw_version,
sizeof(wiphy->fw_version),
&wiphy->hw_version);
if (err != 0) {
dev_err(dev, "Incompatible firmware, aborting\n");
goto out;
}
if (priv->do_fw_download) {
#ifdef CONFIG_HERMES_CACHE_FW_ON_INIT
orinoco_cache_fw(priv, 0);
#endif
err = orinoco_download(priv);
if (err)
priv->do_fw_download = 0;
/* Check firmware version again */
err = determine_fw_capabilities(priv, wiphy->fw_version,
sizeof(wiphy->fw_version),
&wiphy->hw_version);
if (err != 0) {
dev_err(dev, "Incompatible firmware, aborting\n");
goto out;
}
}
if (priv->has_port3)
dev_info(dev, "Ad-hoc demo mode supported\n");
if (priv->has_ibss)
dev_info(dev, "IEEE standard IBSS ad-hoc mode supported\n");
if (priv->has_wep)
dev_info(dev, "WEP supported, %s-bit key\n",
priv->has_big_wep ? "104" : "40");
if (priv->has_wpa) {
dev_info(dev, "WPA-PSK supported\n");
if (orinoco_mic_init(priv)) {
dev_err(dev, "Failed to setup MIC crypto algorithm. "
"Disabling WPA support\n");
priv->has_wpa = 0;
}
}
err = orinoco_hw_read_card_settings(priv, wiphy->perm_addr);
if (err)
goto out;
err = orinoco_hw_allocate_fid(priv);
if (err) {
dev_err(dev, "Failed to allocate NIC buffer!\n");
goto out;
}
/* Set up the default configuration */
priv->iw_mode = NL80211_IFTYPE_STATION;
/* By default use IEEE/IBSS ad-hoc mode if we have it */
priv->prefer_port3 = priv->has_port3 && (!priv->has_ibss);
set_port_type(priv);
priv->channel = 0; /* use firmware default */
priv->promiscuous = 0;
priv->encode_alg = ORINOCO_ALG_NONE;
priv->tx_key = 0;
priv->wpa_enabled = 0;
priv->tkip_cm_active = 0;
priv->key_mgmt = 0;
priv->wpa_ie_len = 0;
priv->wpa_ie = NULL;
if (orinoco_wiphy_register(wiphy)) {
err = -ENODEV;
goto out;
}
/* Make the hardware available, as long as it hasn't been
* removed elsewhere (e.g. by PCMCIA hot unplug) */
orinoco_lock_irq(priv);
priv->hw_unavailable--;
orinoco_unlock_irq(priv);
dev_dbg(dev, "Ready\n");
out:
return err;
}
EXPORT_SYMBOL(orinoco_init);
static const struct net_device_ops orinoco_netdev_ops = {
.ndo_open = orinoco_open,
.ndo_stop = orinoco_stop,
.ndo_start_xmit = orinoco_xmit,
.ndo_set_rx_mode = orinoco_set_multicast_list,
.ndo_change_mtu = orinoco_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = orinoco_tx_timeout,
.ndo_get_stats = orinoco_get_stats,
};
/* Allocate private data.
*
* This driver has a number of structures associated with it
* netdev - Net device structure for each network interface
* wiphy - structure associated with wireless phy
* wireless_dev (wdev) - structure for each wireless interface
* hw - structure for hermes chip info
* card - card specific structure for use by the card driver
* (airport, orinoco_cs)
* priv - orinoco private data
* device - generic linux device structure
*
* +---------+ +---------+
* | wiphy | | netdev |
* | +-------+ | +-------+
* | | priv | | | wdev |
* | | +-----+ +-+-------+
* | | | hw |
* | +-+-----+
* | | card |
* +-+-------+
*
* priv has a link to netdev and device
* wdev has a link to wiphy
*/
struct orinoco_private
*alloc_orinocodev(int sizeof_card,
struct device *device,
int (*hard_reset)(struct orinoco_private *),
int (*stop_fw)(struct orinoco_private *, int))
{
struct orinoco_private *priv;
struct wiphy *wiphy;
/* allocate wiphy
* NOTE: We only support a single virtual interface
* but this may change when monitor mode is added
*/
wiphy = wiphy_new(&orinoco_cfg_ops,
sizeof(struct orinoco_private) + sizeof_card);
if (!wiphy)
return NULL;
priv = wiphy_priv(wiphy);
priv->dev = device;
if (sizeof_card)
priv->card = (void *)((unsigned long)priv
+ sizeof(struct orinoco_private));
else
priv->card = NULL;
orinoco_wiphy_init(wiphy);
#ifdef WIRELESS_SPY
priv->wireless_data.spy_data = &priv->spy_data;
#endif
/* Set up default callbacks */
priv->hard_reset = hard_reset;
priv->stop_fw = stop_fw;
spin_lock_init(&priv->lock);
priv->open = 0;
priv->hw_unavailable = 1; /* orinoco_init() must clear this
* before anything else touches the
* hardware */
INIT_WORK(&priv->reset_work, orinoco_reset);
INIT_WORK(&priv->join_work, orinoco_join_ap);
INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
INIT_LIST_HEAD(&priv->rx_list);
tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
(unsigned long) priv);
spin_lock_init(&priv->scan_lock);
INIT_LIST_HEAD(&priv->scan_list);
INIT_WORK(&priv->process_scan, orinoco_process_scan_results);
priv->last_linkstatus = 0xffff;
#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
priv->cached_pri_fw = NULL;
priv->cached_fw = NULL;
#endif
/* Register PM notifiers */
orinoco_register_pm_notifier(priv);
return priv;
}
EXPORT_SYMBOL(alloc_orinocodev);
/* We can only support a single interface. We provide a separate
* function to set it up to distinguish between hardware
* initialisation and interface setup.
*
* The base_addr and irq parameters are passed on to netdev for use
* with SIOCGIFMAP.
*/
int orinoco_if_add(struct orinoco_private *priv,
unsigned long base_addr,
unsigned int irq,
const struct net_device_ops *ops)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct wireless_dev *wdev;
struct net_device *dev;
int ret;
dev = alloc_etherdev(sizeof(struct wireless_dev));
if (!dev)
return -ENOMEM;
/* Initialise wireless_dev */
wdev = netdev_priv(dev);
wdev->wiphy = wiphy;
wdev->iftype = NL80211_IFTYPE_STATION;
/* Setup / override net_device fields */
dev->ieee80211_ptr = wdev;
dev->watchdog_timeo = HZ; /* 1 second timeout */
dev->wireless_handlers = &orinoco_handler_def;
#ifdef WIRELESS_SPY
dev->wireless_data = &priv->wireless_data;
#endif
/* Default to standard ops if not set */
if (ops)
dev->netdev_ops = ops;
else
dev->netdev_ops = &orinoco_netdev_ops;
/* we use the default eth_mac_addr for setting the MAC addr */
/* Reserve space in skb for the SNAP header */
dev->needed_headroom = ENCAPS_OVERHEAD;
netif_carrier_off(dev);
memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
dev->base_addr = base_addr;
dev->irq = irq;
SET_NETDEV_DEV(dev, priv->dev);
ret = register_netdev(dev);
if (ret)
goto fail;
priv->ndev = dev;
/* Report what we've done */
dev_dbg(priv->dev, "Registerred interface %s.\n", dev->name);
return 0;
fail:
free_netdev(dev);
return ret;
}
EXPORT_SYMBOL(orinoco_if_add);
void orinoco_if_del(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
unregister_netdev(dev);
free_netdev(dev);
}
EXPORT_SYMBOL(orinoco_if_del);
void free_orinocodev(struct orinoco_private *priv)
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct orinoco_rx_data *rx_data, *temp;
struct orinoco_scan_data *sd, *sdtemp;
wiphy_unregister(wiphy);
/* If the tasklet is scheduled when we call tasklet_kill it
* will run one final time. However the tasklet will only
* drain priv->rx_list if the hw is still available. */
tasklet_kill(&priv->rx_tasklet);
/* Explicitly drain priv->rx_list */
list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
list_del(&rx_data->list);
dev_kfree_skb(rx_data->skb);
kfree(rx_data->desc);
kfree(rx_data);
}
cancel_work_sync(&priv->process_scan);
/* Explicitly drain priv->scan_list */
list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) {
list_del(&sd->list);
if ((sd->len > 0) && sd->buf)
kfree(sd->buf);
kfree(sd);
}
orinoco_unregister_pm_notifier(priv);
orinoco_uncache_fw(priv);
priv->wpa_ie_len = 0;
kfree(priv->wpa_ie);
orinoco_mic_free(priv);
wiphy_free(wiphy);
}
EXPORT_SYMBOL(free_orinocodev);
int orinoco_up(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
unsigned long flags;
int err;
priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = orinoco_reinit_firmware(priv);
if (err) {
printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
dev->name, err);
goto exit;
}
netif_device_attach(dev);
priv->hw_unavailable--;
if (priv->open && !priv->hw_unavailable) {
err = __orinoco_up(priv);
if (err)
printk(KERN_ERR "%s: Error %d restarting card\n",
dev->name, err);
}
exit:
priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
return 0;
}
EXPORT_SYMBOL(orinoco_up);
void orinoco_down(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
unsigned long flags;
int err;
priv->hw.ops->lock_irqsave(&priv->lock, &flags);
err = __orinoco_down(priv);
if (err)
printk(KERN_WARNING "%s: Error %d downing interface\n",
dev->name, err);
netif_device_detach(dev);
priv->hw_unavailable++;
priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
}
EXPORT_SYMBOL(orinoco_down);
/********************************************************************/
/* Module initialization */
/********************************************************************/
/* Can't be declared "const" or the whole __initdata section will
* become const */
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (David Gibson <hermes@gibson.dropbear.id.au>, "
"Pavel Roskin <proski@gnu.org>, et al)";
static int __init init_orinoco(void)
{
printk(KERN_DEBUG "%s\n", version);
return 0;
}
static void __exit exit_orinoco(void)
{
}
module_init(init_orinoco);
module_exit(exit_orinoco);
| gpl-2.0 |
wanam/Adam-Kernel-GS4-LTE | sound/oss/pas2_card.c | 5086 | 9558 | /*
* sound/oss/pas2_card.c
*
* Detection routine for the Pro Audio Spectrum cards.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "pas2.h"
#include "sb.h"
static unsigned char dma_bits[] = {
4, 1, 2, 3, 0, 5, 6, 7
};
static unsigned char irq_bits[] = {
0, 0, 1, 2, 3, 4, 5, 6, 0, 1, 7, 8, 9, 0, 10, 11
};
static unsigned char sb_irq_bits[] = {
0x00, 0x00, 0x08, 0x10, 0x00, 0x18, 0x00, 0x20,
0x00, 0x08, 0x28, 0x30, 0x38, 0, 0
};
static unsigned char sb_dma_bits[] = {
0x00, 0x40, 0x80, 0xC0, 0, 0, 0, 0
};
/*
* The Address Translation code is used to convert I/O register addresses to
* be relative to the given base -register
*/
int pas_translate_code = 0;
static int pas_intr_mask;
static int pas_irq;
static int pas_sb_base;
DEFINE_SPINLOCK(pas_lock);
#ifndef CONFIG_PAS_JOYSTICK
static bool joystick;
#else
static bool joystick = 1;
#endif
#ifdef SYMPHONY_PAS
static bool symphony = 1;
#else
static bool symphony;
#endif
#ifdef BROKEN_BUS_CLOCK
static bool broken_bus_clock = 1;
#else
static bool broken_bus_clock;
#endif
static struct address_info cfg;
static struct address_info cfg2;
char pas_model = 0;
static char *pas_model_names[] = {
"",
"Pro AudioSpectrum+",
"CDPC",
"Pro AudioSpectrum 16",
"Pro AudioSpectrum 16D"
};
/*
* pas_read() and pas_write() are equivalents of inb and outb
* These routines perform the I/O address translation required
* to support other than the default base address
*/
extern void mix_write(unsigned char data, int ioaddr);
unsigned char pas_read(int ioaddr)
{
return inb(ioaddr + pas_translate_code);
}
void pas_write(unsigned char data, int ioaddr)
{
outb((data), ioaddr + pas_translate_code);
}
/******************* Begin of the Interrupt Handler ********************/
static irqreturn_t pasintr(int irq, void *dev_id)
{
int status;
status = pas_read(0x0B89);
pas_write(status, 0x0B89); /* Clear interrupt */
if (status & 0x08)
{
pas_pcm_interrupt(status, 1);
status &= ~0x08;
}
if (status & 0x10)
{
pas_midi_interrupt();
status &= ~0x10;
}
return IRQ_HANDLED;
}
int pas_set_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask |= mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
int pas_remove_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask &= ~mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
/******************* End of the Interrupt handler **********************/
/******************* Begin of the Initialization Code ******************/
static int __init config_pas_hw(struct address_info *hw_config)
{
char ok = 1;
unsigned int_ptrs; /* scsi/sound interrupt pointers */
pas_irq = hw_config->irq;
pas_write(0x00, 0x0B8B);
pas_write(0x36, 0x138B);
pas_write(0x36, 0x1388);
pas_write(0, 0x1388);
pas_write(0x74, 0x138B);
pas_write(0x74, 0x1389);
pas_write(0, 0x1389);
pas_write(0x80 | 0x40 | 0x20 | 1, 0x0B8A);
pas_write(0x80 | 0x20 | 0x10 | 0x08 | 0x01, 0xF8A);
pas_write(0x01 | 0x02 | 0x04 | 0x10 /*
* |
* 0x80
*/ , 0xB88);
pas_write(0x80 | (joystick ? 0x40 : 0), 0xF388);
if (pas_irq < 0 || pas_irq > 15)
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
int_ptrs = pas_read(0xF38A);
int_ptrs = (int_ptrs & 0xf0) | irq_bits[pas_irq];
pas_write(int_ptrs, 0xF38A);
if (!irq_bits[pas_irq])
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
if (request_irq(pas_irq, pasintr, 0, "PAS16",hw_config) < 0) {
printk(KERN_ERR "PAS16: Cannot allocate IRQ %d\n",pas_irq);
hw_config->irq=-1;
ok = 0;
}
}
}
if (hw_config->dma < 0 || hw_config->dma > 7)
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
pas_write(dma_bits[hw_config->dma], 0xF389);
if (!dma_bits[hw_config->dma])
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
if (sound_alloc_dma(hw_config->dma, "PAS16"))
{
printk(KERN_ERR "pas2_card.c: Can't allocate DMA channel\n");
hw_config->dma=-1;
ok = 0;
}
}
}
/*
* This fixes the timing problems of the PAS due to the Symphony chipset
* as per Media Vision. Only define this if your PAS doesn't work correctly.
*/
if(symphony)
{
outb((0x05), 0xa8);
outb((0x60), 0xa9);
}
if(broken_bus_clock)
pas_write(0x01 | 0x10 | 0x20 | 0x04, 0x8388);
else
/*
* pas_write(0x01, 0x8388);
*/
pas_write(0x01 | 0x10 | 0x20, 0x8388);
pas_write(0x18, 0x838A); /* ??? */
pas_write(0x20 | 0x01, 0x0B8A); /* Mute off, filter = 17.897 kHz */
pas_write(8, 0xBF8A);
mix_write(0x80 | 5, 0x078B);
mix_write(5, 0x078B);
{
struct address_info *sb_config;
sb_config = &cfg2;
if (sb_config->io_base)
{
unsigned char irq_dma;
/*
* Turn on Sound Blaster compatibility
* bit 1 = SB emulation
* bit 0 = MPU401 emulation (CDPC only :-( )
*/
pas_write(0x02, 0xF788);
/*
* "Emulation address"
*/
pas_write((sb_config->io_base >> 4) & 0x0f, 0xF789);
pas_sb_base = sb_config->io_base;
if (!sb_dma_bits[sb_config->dma])
printk(KERN_ERR "PAS16 Warning: Invalid SB DMA %d\n\n", sb_config->dma);
if (!sb_irq_bits[sb_config->irq])
printk(KERN_ERR "PAS16 Warning: Invalid SB IRQ %d\n\n", sb_config->irq);
irq_dma = sb_dma_bits[sb_config->dma] |
sb_irq_bits[sb_config->irq];
pas_write(irq_dma, 0xFB8A);
}
else
pas_write(0x00, 0xF788);
}
if (!ok)
printk(KERN_WARNING "PAS16: Driver not enabled\n");
return ok;
}
static int __init detect_pas_hw(struct address_info *hw_config)
{
unsigned char board_id, foo;
/*
* WARNING: Setting an option like W:1 or so that disables warm boot reset
* of the card will screw up this detect code something fierce. Adding code
* to handle this means possibly interfering with other cards on the bus if
* you have something on base port 0x388. SO be forewarned.
*/
outb((0xBC), 0x9A01); /* Activate first board */
outb((hw_config->io_base >> 2), 0x9A01); /* Set base address */
pas_translate_code = hw_config->io_base - 0x388;
pas_write(1, 0xBF88); /* Select one wait states */
board_id = pas_read(0x0B8B);
if (board_id == 0xff)
return 0;
/*
* We probably have a PAS-series board, now check for a PAS16-series board
* by trying to change the board revision bits. PAS16-series hardware won't
* let you do this - the bits are read-only.
*/
foo = board_id ^ 0xe0;
pas_write(foo, 0x0B8B);
foo = pas_read(0x0B8B);
pas_write(board_id, 0x0B8B);
if (board_id != foo)
return 0;
pas_model = pas_read(0xFF88);
return pas_model;
}
static void __init attach_pas_card(struct address_info *hw_config)
{
pas_irq = hw_config->irq;
if (detect_pas_hw(hw_config))
{
if ((pas_model = pas_read(0xFF88)))
{
char temp[100];
sprintf(temp,
"%s rev %d", pas_model_names[(int) pas_model],
pas_read(0x2789));
conf_printf(temp, hw_config);
}
if (config_pas_hw(hw_config))
{
pas_pcm_init(hw_config);
pas_midi_init();
pas_init_mixer();
}
}
}
static inline int __init probe_pas(struct address_info *hw_config)
{
return detect_pas_hw(hw_config);
}
static void __exit unload_pas(struct address_info *hw_config)
{
extern int pas_audiodev;
extern int pas2_mididev;
if (hw_config->dma>0)
sound_free_dma(hw_config->dma);
if (hw_config->irq>0)
free_irq(hw_config->irq, hw_config);
if(pas_audiodev!=-1)
sound_unload_mixerdev(audio_devs[pas_audiodev]->mixer_dev);
if(pas2_mididev!=-1)
sound_unload_mididev(pas2_mididev);
if(pas_audiodev!=-1)
sound_unload_audiodev(pas_audiodev);
}
static int __initdata io = -1;
static int __initdata irq = -1;
static int __initdata dma = -1;
static int __initdata dma16 = -1; /* Set this for modules that need it */
static int __initdata sb_io = 0;
static int __initdata sb_irq = -1;
static int __initdata sb_dma = -1;
static int __initdata sb_dma16 = -1;
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(dma, int, 0);
module_param(dma16, int, 0);
module_param(sb_io, int, 0);
module_param(sb_irq, int, 0);
module_param(sb_dma, int, 0);
module_param(sb_dma16, int, 0);
module_param(joystick, bool, 0);
module_param(symphony, bool, 0);
module_param(broken_bus_clock, bool, 0);
MODULE_LICENSE("GPL");
static int __init init_pas2(void)
{
printk(KERN_INFO "Pro Audio Spectrum driver Copyright (C) by Hannu Savolainen 1993-1996\n");
cfg.io_base = io;
cfg.irq = irq;
cfg.dma = dma;
cfg.dma2 = dma16;
cfg2.io_base = sb_io;
cfg2.irq = sb_irq;
cfg2.dma = sb_dma;
cfg2.dma2 = sb_dma16;
if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
return -EINVAL;
}
if (!probe_pas(&cfg))
return -ENODEV;
attach_pas_card(&cfg);
return 0;
}
static void __exit cleanup_pas2(void)
{
unload_pas(&cfg);
}
module_init(init_pas2);
module_exit(cleanup_pas2);
#ifndef MODULE
static int __init setup_pas2(char *str)
{
/* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, sb_dma2 */
int ints[9];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
dma = ints[3];
dma16 = ints[4];
sb_io = ints[5];
sb_irq = ints[6];
sb_dma = ints[7];
sb_dma16 = ints[8];
return 1;
}
__setup("pas2=", setup_pas2);
#endif
| gpl-2.0 |
GearCM/android_kernel_samsung_exynos5410 | drivers/net/wireless/ath/carl9170/led.c | 9438 | 4991 | /*
* Atheros CARL9170 driver
*
* LED handling
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2009, 2010, Christian Lamparer <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "carl9170.h"
#include "cmd.h"
int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state)
{
return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state);
}
int carl9170_led_init(struct ar9170 *ar)
{
int err;
/* disable LEDs */
/* GPIO [0/1 mode: output, 2/3: input] */
err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
if (err)
goto out;
/* GPIO 0/1 value: off */
err = carl9170_led_set_state(ar, 0);
out:
return err;
}
#ifdef CONFIG_CARL9170_LEDS
static void carl9170_led_update(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
int i, tmp = 300, blink_delay = 1000;
u32 led_val = 0;
bool rerun = false;
if (!IS_ACCEPTING_CMD(ar))
return;
mutex_lock(&ar->mutex);
for (i = 0; i < AR9170_NUM_LEDS; i++) {
if (ar->leds[i].registered) {
if (ar->leds[i].last_state ||
ar->leds[i].toggled) {
if (ar->leds[i].toggled)
tmp = 70 + 200 / (ar->leds[i].toggled);
if (tmp < blink_delay)
blink_delay = tmp;
led_val |= 1 << i;
ar->leds[i].toggled = 0;
rerun = true;
}
}
}
carl9170_led_set_state(ar, led_val);
mutex_unlock(&ar->mutex);
if (!rerun)
return;
ieee80211_queue_delayed_work(ar->hw,
&ar->led_work,
msecs_to_jiffies(blink_delay));
}
static void carl9170_led_set_brightness(struct led_classdev *led,
enum led_brightness brightness)
{
struct carl9170_led *arl = container_of(led, struct carl9170_led, l);
struct ar9170 *ar = arl->ar;
if (!arl->registered)
return;
if (arl->last_state != !!brightness) {
arl->toggled++;
arl->last_state = !!brightness;
}
if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
}
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
char *trigger)
{
int err;
snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
"carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
ar->leds[i].ar = ar;
ar->leds[i].l.name = ar->leds[i].name;
ar->leds[i].l.brightness_set = carl9170_led_set_brightness;
ar->leds[i].l.brightness = 0;
ar->leds[i].l.default_trigger = trigger;
err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
&ar->leds[i].l);
if (err) {
wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
ar->leds[i].name, err);
} else {
ar->leds[i].registered = true;
}
return err;
}
void carl9170_led_unregister(struct ar9170 *ar)
{
int i;
for (i = 0; i < AR9170_NUM_LEDS; i++)
if (ar->leds[i].registered) {
led_classdev_unregister(&ar->leds[i].l);
ar->leds[i].registered = false;
ar->leds[i].toggled = 0;
}
cancel_delayed_work_sync(&ar->led_work);
}
int carl9170_led_register(struct ar9170 *ar)
{
int err;
INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update);
err = carl9170_led_register_led(ar, 0, "tx",
ieee80211_get_tx_led_name(ar->hw));
if (err)
goto fail;
if (ar->features & CARL9170_ONE_LED)
return 0;
err = carl9170_led_register_led(ar, 1, "assoc",
ieee80211_get_assoc_led_name(ar->hw));
if (err)
goto fail;
return 0;
fail:
carl9170_led_unregister(ar);
return err;
}
#endif /* CONFIG_CARL9170_LEDS */
| gpl-2.0 |
Savaged-Zen/android_kernel_liquid_tuna | drivers/media/video/gspca/gl860/gl860-mi1320.c | 13022 | 18805 | /* Subdriver for the GL860 chip with the MI1320 sensor
* Author Olivier LORIN from own logs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* Sensor : MI1320 */
#include "gl860.h"
static struct validx tbl_common[] = {
{0xba00, 0x00f0}, {0xba00, 0x00f1}, {0xba51, 0x0066}, {0xba02, 0x00f1},
{0xba05, 0x0067}, {0xba05, 0x00f1}, {0xbaa0, 0x0065}, {0xba00, 0x00f1},
{0xffff, 0xffff},
{0xba00, 0x00f0}, {0xba02, 0x00f1}, {0xbafa, 0x0028}, {0xba02, 0x00f1},
{0xba00, 0x00f0}, {0xba01, 0x00f1}, {0xbaf0, 0x0006}, {0xba0e, 0x00f1},
{0xba70, 0x0006}, {0xba0e, 0x00f1},
{0xffff, 0xffff},
{0xba74, 0x0006}, {0xba0e, 0x00f1},
{0xffff, 0xffff},
{0x0061, 0x0000}, {0x0068, 0x000d},
};
static struct validx tbl_init_at_startup[] = {
{0x0000, 0x0000}, {0x0010, 0x0010},
{35, 0xffff},
{0x0008, 0x00c0}, {0x0001, 0x00c1}, {0x0001, 0x00c2}, {0x0020, 0x0006},
{0x006a, 0x000d},
};
static struct validx tbl_sensor_settings_common[] = {
{0x0010, 0x0010}, {0x0003, 0x00c1}, {0x0042, 0x00c2}, {0x0040, 0x0000},
{0x006a, 0x0007}, {0x006a, 0x000d}, {0x0063, 0x0006},
};
static struct validx tbl_sensor_settings_1280[] = {
{0xba00, 0x00f0}, {0xba00, 0x00f1}, {0xba5a, 0x0066}, {0xba02, 0x00f1},
{0xba05, 0x0067}, {0xba05, 0x00f1}, {0xba20, 0x0065}, {0xba00, 0x00f1},
};
static struct validx tbl_sensor_settings_800[] = {
{0xba00, 0x00f0}, {0xba00, 0x00f1}, {0xba5a, 0x0066}, {0xba02, 0x00f1},
{0xba05, 0x0067}, {0xba05, 0x00f1}, {0xba20, 0x0065}, {0xba00, 0x00f1},
};
static struct validx tbl_sensor_settings_640[] = {
{0xba00, 0x00f0}, {0xba00, 0x00f1}, {0xbaa0, 0x0065}, {0xba00, 0x00f1},
{0xba51, 0x0066}, {0xba02, 0x00f1}, {0xba05, 0x0067}, {0xba05, 0x00f1},
{0xba20, 0x0065}, {0xba00, 0x00f1},
};
static struct validx tbl_post_unset_alt[] = {
{0xba00, 0x00f0}, {0xba00, 0x00f1}, {0xbaa0, 0x0065}, {0xba00, 0x00f1},
{0x0061, 0x0000}, {0x0068, 0x000d},
};
static u8 *tbl_1280[] = {
"\x0d\x80\xf1\x08\x03\x04\xf1\x00" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
"\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
"\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
"\xa9\x04\xf1\x00\xa1\x05\xf1\x00" "\xa4\x04\xf1\x00\xae\x0a\xf1\x08"
,
"\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
"\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
"\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
,
"\xd3\x02\xd4\x28\xd5\x01\xd0\x02" "\xd1\x18\xd2\xc1"
};
static u8 *tbl_800[] = {
"\x0d\x80\xf1\x08\x03\x03\xf1\xc0" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
"\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
"\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
"\xa9\x03\xf1\xc0\xa1\x03\xf1\x20" "\xa4\x02\xf1\x5a\xae\x0a\xf1\x08"
,
"\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
"\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
"\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
,
"\xd3\x02\xd4\x18\xd5\x21\xd0\x02" "\xd1\x10\xd2\x59"
};
static u8 *tbl_640[] = {
"\x0d\x80\xf1\x08\x03\x04\xf1\x04" "\x04\x05\xf1\x02\x07\x01\xf1\x7c"
"\x08\x00\xf1\x0e\x21\x80\xf1\x00" "\x0d\x00\xf1\x08\xf0\x00\xf1\x01"
"\x34\x10\xf1\x10\x3a\x43\xf1\x00" "\xa6\x05\xf1\x02\xa9\x04\xf1\x04"
"\xa7\x02\xf1\x81\xaa\x01\xf1\xe2" "\xae\x0c\xf1\x09"
,
"\xf0\x00\xf1\x02\x39\x03\xf1\xfc" "\x3b\x04\xf1\x04\x57\x01\xf1\xb6"
"\x58\x02\xf1\x0d\x5c\x1f\xf1\x19" "\x5d\x24\xf1\x1e\x64\x5e\xf1\x1c"
"\xd2\x00\xf1\x00\xcb\x00\xf1\x01"
,
"\xd3\x02\xd4\x10\xd5\x81\xd0\x02" "\xd1\x08\xd2\xe1"
};
static s32 tbl_sat[] = {0x25, 0x1d, 0x15, 0x0d, 0x05, 0x4d, 0x55, 0x5d, 0x2d};
static s32 tbl_bright[] = {0, 8, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70};
static s32 tbl_backlight[] = {0x0e, 0x06, 0x02};
static s32 tbl_cntr1[] = {
0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8, 0xc0, 0xc8, 0xd0, 0xe0, 0xf0};
static s32 tbl_cntr2[] = {
0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40, 0x38, 0x30, 0x20, 0x10};
static u8 dat_wbalNL[] =
"\xf0\x00\xf1\x01\x05\x00\xf1\x06" "\x3b\x04\xf1\x2a\x47\x10\xf1\x10"
"\x9d\x3c\xf1\xae\xaf\x10\xf1\x00" "\xf0\x00\xf1\x02\x2f\x91\xf1\x20"
"\x9c\x91\xf1\x20\x37\x03\xf1\x00" "\x9d\xc5\xf1\x0f\xf0\x00\xf1\x00";
static u8 dat_wbalLL[] =
"\xf0\x00\xf1\x01\x05\x00\xf1\x0c" "\x3b\x04\xf1\x2a\x47\x40\xf1\x40"
"\x9d\x20\xf1\xae\xaf\x10\xf1\x00" "\xf0\x00\xf1\x02\x2f\xd1\xf1\x00"
"\x9c\xd1\xf1\x00\x37\x03\xf1\x00" "\x9d\xc5\xf1\x3f\xf0\x00\xf1\x00";
static u8 dat_wbalBL[] =
"\xf0\x00\xf1\x01\x05\x00\xf1\x06" "\x47\x10\xf1\x30\x9d\x3c\xf1\xae"
"\xaf\x10\xf1\x00\xf0\x00\xf1\x02" "\x2f\x91\xf1\x20\x9c\x91\xf1\x20"
"\x37\x03\xf1\x00\x9d\xc5\xf1\x2f" "\xf0\x00\xf1\x00";
static u8 dat_hvflip1[] = {0xf0, 0x00, 0xf1, 0x00};
static u8 dat_common00[] =
"\x00\x01\x07\x6a\x06\x63\x0d\x6a" "\xc0\x00\x10\x10\xc1\x03\xc2\x42"
"\xd8\x04\x58\x00\x04\x02";
static u8 dat_common01[] =
"\x0d\x00\xf1\x0b\x0d\x00\xf1\x08" "\x35\x00\xf1\x22\x68\x00\xf1\x5d"
"\xf0\x00\xf1\x01\x06\x70\xf1\x0e" "\xf0\x00\xf1\x02\xdd\x18\xf1\xe0";
static u8 dat_common02[] =
"\x05\x01\xf1\x84\x06\x00\xf1\x44" "\x07\x00\xf1\xbe\x08\x00\xf1\x1e"
"\x20\x01\xf1\x03\x21\x84\xf1\x00" "\x22\x0d\xf1\x0f\x24\x80\xf1\x00"
"\x34\x18\xf1\x2d\x35\x00\xf1\x22" "\x43\x83\xf1\x83\x59\x00\xf1\xff";
static u8 dat_common03[] =
"\xf0\x00\xf1\x02\x39\x06\xf1\x8c" "\x3a\x06\xf1\x8c\x3b\x03\xf1\xda"
"\x3c\x05\xf1\x30\x57\x01\xf1\x0c" "\x58\x01\xf1\x42\x59\x01\xf1\x0c"
"\x5a\x01\xf1\x42\x5c\x13\xf1\x0e" "\x5d\x17\xf1\x12\x64\x1e\xf1\x1c";
static u8 dat_common04[] =
"\xf0\x00\xf1\x02\x24\x5f\xf1\x20" "\x28\xea\xf1\x02\x5f\x41\xf1\x43";
static u8 dat_common05[] =
"\x02\x00\xf1\xee\x03\x29\xf1\x1a" "\x04\x02\xf1\xa4\x09\x00\xf1\x68"
"\x0a\x00\xf1\x2a\x0b\x00\xf1\x04" "\x0c\x00\xf1\x93\x0d\x00\xf1\x82"
"\x0e\x00\xf1\x40\x0f\x00\xf1\x5f" "\x10\x00\xf1\x4e\x11\x00\xf1\x5b";
static u8 dat_common06[] =
"\x15\x00\xf1\xc9\x16\x00\xf1\x5e" "\x17\x00\xf1\x9d\x18\x00\xf1\x06"
"\x19\x00\xf1\x89\x1a\x00\xf1\x12" "\x1b\x00\xf1\xa1\x1c\x00\xf1\xe4"
"\x1d\x00\xf1\x7a\x1e\x00\xf1\x64" "\xf6\x00\xf1\x5f";
static u8 dat_common07[] =
"\xf0\x00\xf1\x01\x53\x09\xf1\x03" "\x54\x3d\xf1\x1c\x55\x99\xf1\x72"
"\x56\xc1\xf1\xb1\x57\xd8\xf1\xce" "\x58\xe0\xf1\x00\xdc\x0a\xf1\x03"
"\xdd\x45\xf1\x20\xde\xae\xf1\x82" "\xdf\xdc\xf1\xc9\xe0\xf6\xf1\xea"
"\xe1\xff\xf1\x00";
static u8 dat_common08[] =
"\xf0\x00\xf1\x01\x80\x00\xf1\x06" "\x81\xf6\xf1\x08\x82\xfb\xf1\xf7"
"\x83\x00\xf1\xfe\xb6\x07\xf1\x03" "\xb7\x18\xf1\x0c\x84\xfb\xf1\x06"
"\x85\xfb\xf1\xf9\x86\x00\xf1\xff" "\xb8\x07\xf1\x04\xb9\x16\xf1\x0a";
static u8 dat_common09[] =
"\x87\xfa\xf1\x05\x88\xfc\xf1\xf9" "\x89\x00\xf1\xff\xba\x06\xf1\x03"
"\xbb\x17\xf1\x09\x8a\xe8\xf1\x14" "\x8b\xf7\xf1\xf0\x8c\xfd\xf1\xfa"
"\x8d\x00\xf1\x00\xbc\x05\xf1\x01" "\xbd\x0c\xf1\x08\xbe\x00\xf1\x14";
static u8 dat_common10[] =
"\x8e\xea\xf1\x13\x8f\xf7\xf1\xf2" "\x90\xfd\xf1\xfa\x91\x00\xf1\x00"
"\xbf\x05\xf1\x01\xc0\x0a\xf1\x08" "\xc1\x00\xf1\x0c\x92\xed\xf1\x0f"
"\x93\xf9\xf1\xf4\x94\xfe\xf1\xfb" "\x95\x00\xf1\x00\xc2\x04\xf1\x01"
"\xc3\x0a\xf1\x07\xc4\x00\xf1\x10";
static u8 dat_common11[] =
"\xf0\x00\xf1\x01\x05\x00\xf1\x06" "\x25\x00\xf1\x55\x34\x10\xf1\x10"
"\x35\xf0\xf1\x10\x3a\x02\xf1\x03" "\x3b\x04\xf1\x2a\x9b\x43\xf1\x00"
"\xa4\x03\xf1\xc0\xa7\x02\xf1\x81";
static int mi1320_init_at_startup(struct gspca_dev *gspca_dev);
static int mi1320_configure_alt(struct gspca_dev *gspca_dev);
static int mi1320_init_pre_alt(struct gspca_dev *gspca_dev);
static int mi1320_init_post_alt(struct gspca_dev *gspca_dev);
static void mi1320_post_unset_alt(struct gspca_dev *gspca_dev);
static int mi1320_sensor_settings(struct gspca_dev *gspca_dev);
static int mi1320_camera_settings(struct gspca_dev *gspca_dev);
/*==========================================================================*/
void mi1320_init_settings(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->vcur.backlight = 0;
sd->vcur.brightness = 0;
sd->vcur.sharpness = 6;
sd->vcur.contrast = 10;
sd->vcur.gamma = 20;
sd->vcur.hue = 0;
sd->vcur.saturation = 6;
sd->vcur.whitebal = 0;
sd->vcur.mirror = 0;
sd->vcur.flip = 0;
sd->vcur.AC50Hz = 1;
sd->vmax.backlight = 2;
sd->vmax.brightness = 8;
sd->vmax.sharpness = 7;
sd->vmax.contrast = 0; /* 10 but not working with this driver */
sd->vmax.gamma = 40;
sd->vmax.hue = 5 + 1;
sd->vmax.saturation = 8;
sd->vmax.whitebal = 2;
sd->vmax.mirror = 1;
sd->vmax.flip = 1;
sd->vmax.AC50Hz = 1;
sd->dev_camera_settings = mi1320_camera_settings;
sd->dev_init_at_startup = mi1320_init_at_startup;
sd->dev_configure_alt = mi1320_configure_alt;
sd->dev_init_pre_alt = mi1320_init_pre_alt;
sd->dev_post_unset_alt = mi1320_post_unset_alt;
}
/*==========================================================================*/
static void common(struct gspca_dev *gspca_dev)
{
s32 n; /* reserved for FETCH functions */
ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 22, dat_common00);
ctrl_out(gspca_dev, 0x40, 1, 0x0041, 0x0000, 0, NULL);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 32, dat_common01);
n = fetch_validx(gspca_dev, tbl_common, ARRAY_SIZE(tbl_common));
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 48, dat_common02);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 48, dat_common03);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 16, dat_common04);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 48, dat_common05);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 44, dat_common06);
keep_on_fetching_validx(gspca_dev, tbl_common,
ARRAY_SIZE(tbl_common), n);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 52, dat_common07);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 48, dat_common08);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 48, dat_common09);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 56, dat_common10);
keep_on_fetching_validx(gspca_dev, tbl_common,
ARRAY_SIZE(tbl_common), n);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 40, dat_common11);
keep_on_fetching_validx(gspca_dev, tbl_common,
ARRAY_SIZE(tbl_common), n);
}
static int mi1320_init_at_startup(struct gspca_dev *gspca_dev)
{
fetch_validx(gspca_dev, tbl_init_at_startup,
ARRAY_SIZE(tbl_init_at_startup));
common(gspca_dev);
/* ctrl_out(gspca_dev, 0x40, 11, 0x0000, 0x0000, 0, NULL); */
return 0;
}
static int mi1320_init_pre_alt(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->mirrorMask = 0;
sd->vold.backlight = -1;
sd->vold.brightness = -1;
sd->vold.sharpness = -1;
sd->vold.contrast = -1;
sd->vold.saturation = -1;
sd->vold.gamma = -1;
sd->vold.hue = -1;
sd->vold.whitebal = -1;
sd->vold.mirror = -1;
sd->vold.flip = -1;
sd->vold.AC50Hz = -1;
common(gspca_dev);
mi1320_sensor_settings(gspca_dev);
mi1320_init_post_alt(gspca_dev);
return 0;
}
static int mi1320_init_post_alt(struct gspca_dev *gspca_dev)
{
mi1320_camera_settings(gspca_dev);
return 0;
}
static int mi1320_sensor_settings(struct gspca_dev *gspca_dev)
{
s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv;
ctrl_out(gspca_dev, 0x40, 5, 0x0001, 0x0000, 0, NULL);
fetch_validx(gspca_dev, tbl_sensor_settings_common,
ARRAY_SIZE(tbl_sensor_settings_common));
switch (reso) {
case IMAGE_1280:
fetch_validx(gspca_dev, tbl_sensor_settings_1280,
ARRAY_SIZE(tbl_sensor_settings_1280));
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 64, tbl_1280[0]);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 40, tbl_1280[1]);
ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, tbl_1280[2]);
break;
case IMAGE_800:
fetch_validx(gspca_dev, tbl_sensor_settings_800,
ARRAY_SIZE(tbl_sensor_settings_800));
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 64, tbl_800[0]);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 40, tbl_800[1]);
ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, tbl_800[2]);
break;
default:
fetch_validx(gspca_dev, tbl_sensor_settings_640,
ARRAY_SIZE(tbl_sensor_settings_640));
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 60, tbl_640[0]);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 40, tbl_640[1]);
ctrl_out(gspca_dev, 0x40, 3, 0x0000, 0x0200, 12, tbl_640[2]);
break;
}
return 0;
}
static int mi1320_configure_alt(struct gspca_dev *gspca_dev)
{
s32 reso = gspca_dev->cam.cam_mode[(s32) gspca_dev->curr_mode].priv;
switch (reso) {
case IMAGE_640:
gspca_dev->alt = 3 + 1;
break;
case IMAGE_800:
case IMAGE_1280:
gspca_dev->alt = 1 + 1;
break;
}
return 0;
}
static int mi1320_camera_settings(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 backlight = sd->vcur.backlight;
s32 bright = sd->vcur.brightness;
s32 sharp = sd->vcur.sharpness;
s32 cntr = sd->vcur.contrast;
s32 gam = sd->vcur.gamma;
s32 hue = sd->vcur.hue;
s32 sat = sd->vcur.saturation;
s32 wbal = sd->vcur.whitebal;
s32 mirror = (((sd->vcur.mirror > 0) ^ sd->mirrorMask) > 0);
s32 flip = (((sd->vcur.flip > 0) ^ sd->mirrorMask) > 0);
s32 freq = (sd->vcur.AC50Hz > 0);
s32 i;
if (freq != sd->vold.AC50Hz) {
sd->vold.AC50Hz = freq;
freq = 2 * (freq == 0);
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba02, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 , 0x005b, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01 + freq, 0x00f1, 0, NULL);
}
if (wbal != sd->vold.whitebal) {
sd->vold.whitebal = wbal;
if (wbal < 0 || wbal > sd->vmax.whitebal)
wbal = 0;
for (i = 0; i < 2; i++) {
if (wbal == 0) { /* Normal light */
ctrl_out(gspca_dev, 0x40, 1,
0x0010, 0x0010, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0003, 0x00c1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0042, 0x00c2, 0, NULL);
ctrl_out(gspca_dev, 0x40, 3,
0xba00, 0x0200, 48, dat_wbalNL);
}
if (wbal == 1) { /* Low light */
ctrl_out(gspca_dev, 0x40, 1,
0x0010, 0x0010, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0004, 0x00c1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0043, 0x00c2, 0, NULL);
ctrl_out(gspca_dev, 0x40, 3,
0xba00, 0x0200, 48, dat_wbalLL);
}
if (wbal == 2) { /* Back light */
ctrl_out(gspca_dev, 0x40, 1,
0x0010, 0x0010, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0003, 0x00c1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1,
0x0042, 0x00c2, 0, NULL);
ctrl_out(gspca_dev, 0x40, 3,
0xba00, 0x0200, 44, dat_wbalBL);
}
}
}
if (bright != sd->vold.brightness) {
sd->vold.brightness = bright;
if (bright < 0 || bright > sd->vmax.brightness)
bright = 0;
bright = tbl_bright[bright];
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + bright, 0x0034, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + bright, 0x00f1, 0, NULL);
}
if (sat != sd->vold.saturation) {
sd->vold.saturation = sat;
if (sat < 0 || sat > sd->vmax.saturation)
sat = 0;
sat = tbl_sat[sat];
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 , 0x0025, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + sat, 0x00f1, 0, NULL);
}
if (sharp != sd->vold.sharpness) {
sd->vold.sharpness = sharp;
if (sharp < 0 || sharp > sd->vmax.sharpness)
sharp = 0;
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 , 0x0005, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + sharp, 0x00f1, 0, NULL);
}
if (hue != sd->vold.hue) {
/* 0=normal 1=NB 2="sepia" 3=negative 4=other 5=other2 */
if (hue < 0 || hue > sd->vmax.hue)
hue = 0;
if (hue == sd->vmax.hue)
sd->swapRB = 1;
else
sd->swapRB = 0;
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba70, 0x00e2, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + hue * (hue < 6), 0x00f1,
0, NULL);
}
if (backlight != sd->vold.backlight) {
sd->vold.backlight = backlight;
if (backlight < 0 || backlight > sd->vmax.backlight)
backlight = 0;
backlight = tbl_backlight[backlight];
for (i = 0; i < 2; i++) {
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba74, 0x0006, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba80 + backlight, 0x00f1,
0, NULL);
}
}
if (hue != sd->vold.hue) {
sd->vold.hue = hue;
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba70, 0x00e2, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + hue * (hue < 6), 0x00f1,
0, NULL);
}
if (mirror != sd->vold.mirror || flip != sd->vold.flip) {
u8 dat_hvflip2[4] = {0x20, 0x01, 0xf1, 0x00};
sd->vold.mirror = mirror;
sd->vold.flip = flip;
dat_hvflip2[3] = flip + 2 * mirror;
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 4, dat_hvflip1);
ctrl_out(gspca_dev, 0x40, 3, 0xba00, 0x0200, 4, dat_hvflip2);
}
if (gam != sd->vold.gamma) {
sd->vold.gamma = gam;
if (gam < 0 || gam > sd->vmax.gamma)
gam = 0;
gam = 2 * gam;
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba04 , 0x003b, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba02 + gam, 0x00f1, 0, NULL);
}
if (cntr != sd->vold.contrast) {
sd->vold.contrast = cntr;
if (cntr < 0 || cntr > sd->vmax.contrast)
cntr = 0;
ctrl_out(gspca_dev, 0x40, 1, 0xba00, 0x00f0, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba01, 0x00f1, 0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + tbl_cntr1[cntr], 0x0035,
0, NULL);
ctrl_out(gspca_dev, 0x40, 1, 0xba00 + tbl_cntr2[cntr], 0x00f1,
0, NULL);
}
return 0;
}
static void mi1320_post_unset_alt(struct gspca_dev *gspca_dev)
{
ctrl_out(gspca_dev, 0x40, 5, 0x0000, 0x0000, 0, NULL);
fetch_validx(gspca_dev, tbl_post_unset_alt,
ARRAY_SIZE(tbl_post_unset_alt));
}
| gpl-2.0 |
flashalot/android_kernel_samsung_milletwifi | lib/ts_bm.c | 14558 | 5408 | /*
* lib/ts_bm.c Boyer-Moore text search implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Pablo Neira Ayuso <pablo@eurodev.net>
*
* ==========================================================================
*
* Implements Boyer-Moore string matching algorithm:
*
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
* http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
*
* Note: Since Boyer-Moore (BM) performs searches for matchings from right
* to left, it's still possible that a matching could be spread over
* multiple blocks, in that case this algorithm won't find any coincidence.
*
* If you're willing to ensure that such thing won't ever happen, use the
* Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
* the proper string search algorithm depending on your setting.
*
* Say you're using the textsearch infrastructure for filtering, NIDS or
* any similar security focused purpose, then go KMP. Otherwise, if you
* really care about performance, say you're classifying packets to apply
* Quality of Service (QoS) policies, and you don't mind about possible
* matchings spread over multiple fragments, then go BM.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
#define ASIZE 256
#if 0
#define DEBUGP printk
#else
#define DEBUGP(args, format...)
#endif
struct ts_bm
{
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
unsigned int good_shift[0];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen - 1, bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
while (shift < text_len) {
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
if ((icase ? toupper(text[shift-i])
: text[shift-i])
!= bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
DEBUGP("found!\n");
return consumed += (shift-(bm->patlen-1));
next: bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
}
consumed += text_len;
}
return UINT_MAX;
}
static int subpattern(u8 *pattern, int i, int j, int g)
{
int x = i+g-1, y = j+g-1, ret = 0;
while(pattern[x--] == pattern[y--]) {
if (y < 0) {
ret = 1;
break;
}
if (--g == 0) {
ret = pattern[i-1] != pattern[j-1];
break;
}
}
return ret;
}
static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
if (flags & TS_IGNORECASE)
bm->bad_shift[tolower(bm->pattern[i])]
= bm->patlen - 1 - i;
}
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
bm->good_shift[0] = 1;
for (i = 1; i < bm->patlen; i++)
bm->good_shift[i] = bm->patlen;
for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
for (j = i-1; j >= 1-g ; j--)
if (subpattern(bm->pattern, i, j, g)) {
bm->good_shift[g] = bm->patlen-j-g;
break;
}
}
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
if (flags & TS_IGNORECASE)
for (i = 0; i < len; i++)
bm->pattern[i] = toupper(((u8 *)pattern)[i]);
else
memcpy(bm->pattern, pattern, len);
compute_prefix_tbl(bm, flags);
return conf;
}
static void *bm_get_pattern(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->pattern;
}
static unsigned int bm_get_pattern_len(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->patlen;
}
static struct ts_ops bm_ops = {
.name = "bm",
.find = bm_find,
.init = bm_init,
.get_pattern = bm_get_pattern,
.get_pattern_len = bm_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(bm_ops.list)
};
static int __init init_bm(void)
{
return textsearch_register(&bm_ops);
}
static void __exit exit_bm(void)
{
textsearch_unregister(&bm_ops);
}
MODULE_LICENSE("GPL");
module_init(init_bm);
module_exit(exit_bm);
| gpl-2.0 |
crowell/gbadev.kernel | lib/ts_bm.c | 14558 | 5408 | /*
* lib/ts_bm.c Boyer-Moore text search implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Pablo Neira Ayuso <pablo@eurodev.net>
*
* ==========================================================================
*
* Implements Boyer-Moore string matching algorithm:
*
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
* http://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
*
* Note: Since Boyer-Moore (BM) performs searches for matchings from right
* to left, it's still possible that a matching could be spread over
* multiple blocks, in that case this algorithm won't find any coincidence.
*
* If you're willing to ensure that such thing won't ever happen, use the
* Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
* the proper string search algorithm depending on your setting.
*
* Say you're using the textsearch infrastructure for filtering, NIDS or
* any similar security focused purpose, then go KMP. Otherwise, if you
* really care about performance, say you're classifying packets to apply
* Quality of Service (QoS) policies, and you don't mind about possible
* matchings spread over multiple fragments, then go BM.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
#define ASIZE 256
#if 0
#define DEBUGP printk
#else
#define DEBUGP(args, format...)
#endif
struct ts_bm
{
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
unsigned int good_shift[0];
};
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen - 1, bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
while (shift < text_len) {
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
if ((icase ? toupper(text[shift-i])
: text[shift-i])
!= bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
DEBUGP("found!\n");
return consumed += (shift-(bm->patlen-1));
next: bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
}
consumed += text_len;
}
return UINT_MAX;
}
static int subpattern(u8 *pattern, int i, int j, int g)
{
int x = i+g-1, y = j+g-1, ret = 0;
while(pattern[x--] == pattern[y--]) {
if (y < 0) {
ret = 1;
break;
}
if (--g == 0) {
ret = pattern[i-1] != pattern[j-1];
break;
}
}
return ret;
}
static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
if (flags & TS_IGNORECASE)
bm->bad_shift[tolower(bm->pattern[i])]
= bm->patlen - 1 - i;
}
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
bm->good_shift[0] = 1;
for (i = 1; i < bm->patlen; i++)
bm->good_shift[i] = bm->patlen;
for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
for (j = i-1; j >= 1-g ; j--)
if (subpattern(bm->pattern, i, j, g)) {
bm->good_shift[g] = bm->patlen-j-g;
break;
}
}
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
if (flags & TS_IGNORECASE)
for (i = 0; i < len; i++)
bm->pattern[i] = toupper(((u8 *)pattern)[i]);
else
memcpy(bm->pattern, pattern, len);
compute_prefix_tbl(bm, flags);
return conf;
}
static void *bm_get_pattern(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->pattern;
}
static unsigned int bm_get_pattern_len(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->patlen;
}
static struct ts_ops bm_ops = {
.name = "bm",
.find = bm_find,
.init = bm_init,
.get_pattern = bm_get_pattern,
.get_pattern_len = bm_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(bm_ops.list)
};
static int __init init_bm(void)
{
return textsearch_register(&bm_ops);
}
static void __exit exit_bm(void)
{
textsearch_unregister(&bm_ops);
}
MODULE_LICENSE("GPL");
module_init(init_bm);
module_exit(exit_bm);
| gpl-2.0 |
bigzz/linux-btrfs | kernel/power/main.c | 223 | 15517 | /*
* kernel/power/main.c - PM subsystem core functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* This file is released under the GPLv2
*
*/
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/pm-trace.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "power.h"
DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
int register_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_pm_notifier);
int unregister_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 1;
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_async_enabled);
}
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_async_enabled = val;
return n;
}
power_attr(pm_async);
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
[TEST_NONE] = "none",
[TEST_CORE] = "core",
[TEST_CPUS] = "processors",
[TEST_PLATFORM] = "platform",
[TEST_DEVICES] = "devices",
[TEST_FREEZER] = "freezer",
};
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
int level;
for (level = TEST_FIRST; level <= TEST_MAX; level++)
if (pm_tests[level]) {
if (level == pm_test_level)
s += sprintf(s, "[%s] ", pm_tests[level]);
else
s += sprintf(s, "%s ", pm_tests[level]);
}
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
const char * const *s;
int level;
char *p;
int len;
int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
pm_test_level = level;
error = 0;
break;
}
unlock_system_sleep();
return error ? error : n;
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
{
switch (step) {
case SUSPEND_FREEZE:
return "freeze";
case SUSPEND_PREPARE:
return "prepare";
case SUSPEND_SUSPEND:
return "suspend";
case SUSPEND_SUSPEND_NOIRQ:
return "suspend_noirq";
case SUSPEND_RESUME_NOIRQ:
return "resume_noirq";
case SUSPEND_RESUME:
return "resume";
default:
return "";
}
}
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
"success", suspend_stats.success,
"fail", suspend_stats.fail,
"failed_freeze", suspend_stats.failed_freeze,
"failed_prepare", suspend_stats.failed_prepare,
"failed_suspend", suspend_stats.failed_suspend,
"failed_suspend_late",
suspend_stats.failed_suspend_late,
"failed_suspend_noirq",
suspend_stats.failed_suspend_noirq,
"failed_resume", suspend_stats.failed_resume,
"failed_resume_early",
suspend_stats.failed_resume_early,
"failed_resume_noirq",
suspend_stats.failed_resume_noirq);
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-d\n",
suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[last_step]));
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[index]));
}
return 0;
}
static int suspend_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_stats_show, NULL);
}
static const struct file_operations suspend_stats_operations = {
.open = suspend_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
NULL, NULL, &suspend_stats_operations);
return 0;
}
late_initcall(pm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
/*
* pm_print_times: print time taken by devices to suspend and resume.
*
* show() returns whether printing of suspend and resume times is enabled.
* store() accepts 0 or 1. 0 disables printing and 1 enables it.
*/
bool pm_print_times_enabled;
static ssize_t pm_print_times_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_print_times_enabled);
}
static ssize_t pm_print_times_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_print_times_enabled = !!val;
return n;
}
power_attr(pm_print_times);
static inline void pm_print_times_init(void)
{
pm_print_times_enabled = !!initcall_debug;
}
#else /* !CONFIG_PP_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
struct kobject *power_kobj;
/**
* state - control system sleep states.
*
* show() returns available sleep state labels, which may be "mem", "standby",
* "freeze" and "disk" (hibernation). See Documentation/power/states.txt for a
* description of what they mean.
*
* store() accepts one of those strings, translates it into the proper
* enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
suspend_state_t i;
for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
if (pm_states[i])
s += sprintf(s,"%s ", pm_states[i]);
#endif
if (hibernation_available())
s += sprintf(s, "disk ");
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state;
#endif
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
/* Check hibernation first. */
if (len == 4 && !strncmp(buf, "disk", len))
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
const char *label = pm_states[state];
if (label && len == strlen(label) && !strncmp(buf, label, len))
return state;
}
#endif
return PM_SUSPEND_ON;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
state = decode_state(buf, n);
if (state < PM_SUSPEND_MAX)
error = pm_suspend(state);
else if (state == PM_SUSPEND_MAX)
error = hibernate();
else
error = -EINVAL;
out:
pm_autosleep_unlock();
return error ? error : n;
}
power_attr(state);
#ifdef CONFIG_PM_SLEEP
/*
* The 'wakeup_count' attribute, along with the functions defined in
* drivers/base/power/wakeup.c, provides a means by which wakeup events can be
* handled in a non-racy way.
*
* If a wakeup event occurs when the system is in a sleep state, it simply is
* woken up. In turn, if an event that would wake the system up from a sleep
* state occurs when it is undergoing a transition to that sleep state, the
* transition should be aborted. Moreover, if such an event occurs when the
* system is in the working state, an attempt to start a transition to the
* given sleep state should fail during certain period after the detection of
* the event. Using the 'state' attribute alone is not sufficient to satisfy
* these requirements, because a wakeup event may occur exactly when 'state'
* is being written to and may be delivered to user space right before it is
* frozen, so the event will remain only partially processed until the system is
* woken up by another event. In particular, it won't cause the transition to
* a sleep state to be aborted.
*
* This difficulty may be overcome if user space uses 'wakeup_count' before
* writing to 'state'. It first should read from 'wakeup_count' and store
* the read value. Then, after carrying out its own preparations for the system
* transition to a sleep state, it should write the stored value to
* 'wakeup_count'. If that fails, at least one wakeup event has occurred since
* 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
* is allowed to write to 'state', but the transition will be aborted if there
* are any wakeup events detected after 'wakeup_count' was written to.
*/
static ssize_t wakeup_count_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
unsigned int val;
return pm_get_wakeup_count(&val, true) ?
sprintf(buf, "%u\n", val) : -EINTR;
}
static ssize_t wakeup_count_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
error = -EINVAL;
if (sscanf(buf, "%u", &val) == 1) {
if (pm_save_wakeup_count(val))
error = n;
else
pm_print_active_wakeup_sources();
}
out:
pm_autosleep_unlock();
return error;
}
power_attr(wakeup_count);
#ifdef CONFIG_PM_AUTOSLEEP
static ssize_t autosleep_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
suspend_state_t state = pm_autosleep_state();
if (state == PM_SUSPEND_ON)
return sprintf(buf, "off\n");
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
return sprintf(buf, "%s\n", pm_states[state] ?
pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
return sprintf(buf, "disk\n");
#else
return sprintf(buf, "error");
#endif
}
static ssize_t autosleep_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state = decode_state(buf, n);
int error;
if (state == PM_SUSPEND_ON
&& strcmp(buf, "off") && strcmp(buf, "off\n"))
return -EINVAL;
error = pm_autosleep_set_state(state);
return error ? error : n;
}
power_attr(autosleep);
#endif /* CONFIG_PM_AUTOSLEEP */
#ifdef CONFIG_PM_WAKELOCKS
static ssize_t wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, true);
}
static ssize_t wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_lock(buf);
return error ? error : n;
}
power_attr(wake_lock);
static ssize_t wake_unlock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, false);
}
static ssize_t wake_unlock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_unlock(buf);
return error ? error : n;
}
power_attr(wake_unlock);
#endif /* CONFIG_PM_WAKELOCKS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
int val;
if (sscanf(buf, "%d", &val) == 1) {
pm_trace_enabled = !!val;
if (pm_trace_enabled) {
pr_warn("PM: Enabling pm_trace changes system date and time during resume.\n"
"PM: Correct system time has to be restored manually after resume.\n");
}
return n;
}
return -EINVAL;
}
power_attr(pm_trace);
static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return show_trace_dev_match(buf, PAGE_SIZE);
}
static ssize_t
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
return -EINVAL;
}
power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
#ifdef CONFIG_FREEZER
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
freeze_timeout_msecs = val;
return n;
}
power_attr(pm_freeze_timeout);
#endif /* CONFIG_FREEZER*/
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
&pm_trace_dev_match_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
&wakeup_count_attr.attr,
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,
#endif
#ifdef CONFIG_PM_WAKELOCKS
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_print_times_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
&pm_freeze_timeout_attr.attr,
#endif
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
return pm_wq ? 0 : -ENOMEM;
}
static int __init pm_init(void)
{
int error = pm_start_workqueue();
if (error)
return error;
hibernate_image_size_init();
hibernate_reserved_size_init();
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return error;
pm_print_times_init();
return pm_autosleep_init();
}
core_initcall(pm_init);
| gpl-2.0 |
kabata1975/android_kernel_c8690 | drivers/usb/serial/qcserial.c | 223 | 10993 | /*
* Qualcomm Serial USB driver
*
* Copyright (c) 2008 QUALCOMM Incorporated.
* Copyright (c) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2009 Novell Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/slab.h>
#include "usb-wwan.h"
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
static int debug;
#define DEVICE_G1K(v, p) \
USB_DEVICE(v, p), .driver_info = 1
static const struct usb_device_id id_table[] = {
/* Gobi 1000 devices */
{DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
{DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
{DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
{DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
{DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
{DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
{DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
{DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
{DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
{DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
{DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
{DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
/* Gobi 2000 devices */
{USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */
{USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
{USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */
{USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */
{USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
{USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */
{USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */
{USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */
{USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */
{USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */
{USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
{USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */
{USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
/* Gobi 3000 devices */
{USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
{USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver qcdriver = {
.name = "qcserial",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
.supports_autosuspend = true,
};
static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
{
struct usb_wwan_intf_private *data;
struct usb_host_interface *intf = serial->interface->cur_altsetting;
int retval = -ENODEV;
__u8 nintf;
__u8 ifnum;
bool is_gobi1k = id->driver_info ? true : false;
dbg("%s", __func__);
dbg("Is Gobi 1000 = %d", is_gobi1k);
nintf = serial->dev->actconfig->desc.bNumInterfaces;
dbg("Num Interfaces = %d", nintf);
ifnum = intf->desc.bInterfaceNumber;
dbg("This Interface = %d", ifnum);
data = kzalloc(sizeof(struct usb_wwan_intf_private),
GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
usb_enable_autosuspend(serial->dev);
switch (nintf) {
case 1:
/* QDL mode */
/* Gobi 2000 has a single altsetting, older ones have two */
if (serial->interface->num_altsetting == 2)
intf = &serial->interface->altsetting[1];
else if (serial->interface->num_altsetting > 2)
break;
if (intf->desc.bNumEndpoints == 2 &&
usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) &&
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dbg("QDL port found");
if (serial->interface->num_altsetting == 1) {
retval = 0; /* Success */
break;
}
retval = usb_set_interface(serial->dev, ifnum, 1);
if (retval < 0) {
dev_err(&serial->dev->dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
}
break;
case 3:
case 4:
/* Composite mode; don't bind to the QMI/net interface as that
* gets handled by other drivers.
*/
/* Gobi 1K USB layout:
* 0: serial port (doesn't respond)
* 1: serial port (doesn't respond)
* 2: AT-capable modem port
* 3: QMI/net
*
* Gobi 2K+ USB layout:
* 0: QMI/net
* 1: DM/DIAG (use libqcdm from ModemManager for communication)
* 2: AT-capable modem port
* 3: NMEA
*/
if (ifnum == 1 && !is_gobi1k) {
dbg("Gobi 2K+ DM/DIAG interface found");
retval = usb_set_interface(serial->dev, ifnum, 0);
if (retval < 0) {
dev_err(&serial->dev->dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
} else if (ifnum == 2) {
dbg("Modem port found");
retval = usb_set_interface(serial->dev, ifnum, 0);
if (retval < 0) {
dev_err(&serial->dev->dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
} else if (ifnum==3 && !is_gobi1k) {
/*
* NMEA (serial line 9600 8N1)
* # echo "\$GPS_START" > /dev/ttyUSBx
* # echo "\$GPS_STOP" > /dev/ttyUSBx
*/
dbg("Gobi 2K+ NMEA GPS interface found");
retval = usb_set_interface(serial->dev, ifnum, 0);
if (retval < 0) {
dev_err(&serial->dev->dev,
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
}
break;
default:
dev_err(&serial->dev->dev,
"unknown number of interfaces: %d\n", nintf);
kfree(data);
retval = -ENODEV;
}
/* Set serial->private if not returning -ENODEV */
if (retval != -ENODEV)
usb_set_serial_data(serial, data);
return retval;
}
static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
dbg("%s", __func__);
/* Call usb_wwan release & free the private data allocated in qcprobe */
usb_wwan_release(serial);
usb_set_serial_data(serial, NULL);
kfree(priv);
}
static struct usb_serial_driver qcdevice = {
.driver = {
.owner = THIS_MODULE,
.name = "qcserial",
},
.description = "Qualcomm USB modem",
.id_table = id_table,
.usb_driver = &qcdriver,
.num_ports = 1,
.probe = qcprobe,
.open = usb_wwan_open,
.close = usb_wwan_close,
.write = usb_wwan_write,
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
.release = qc_release,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
#endif
};
static int __init qcinit(void)
{
int retval;
retval = usb_serial_register(&qcdevice);
if (retval)
return retval;
retval = usb_register(&qcdriver);
if (retval) {
usb_serial_deregister(&qcdevice);
return retval;
}
return 0;
}
static void __exit qcexit(void)
{
usb_deregister(&qcdriver);
usb_serial_deregister(&qcdevice);
}
module_init(qcinit);
module_exit(qcexit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
FrancescoCG/Crazy-Kernel1-TW-Kernel | drivers/gpu/drm/i915/intel_tv.c | 2015 | 48946 | /*
* Copyright © 2006-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/** @file
* Integrated TV-out support for the 915GM and 945GM.
*/
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM
};
/** Private structure for the integrated TV support */
struct intel_tv {
struct intel_encoder base;
int type;
const char *tv_format;
int margin[4];
u32 save_TV_H_CTL_1;
u32 save_TV_H_CTL_2;
u32 save_TV_H_CTL_3;
u32 save_TV_V_CTL_1;
u32 save_TV_V_CTL_2;
u32 save_TV_V_CTL_3;
u32 save_TV_V_CTL_4;
u32 save_TV_V_CTL_5;
u32 save_TV_V_CTL_6;
u32 save_TV_V_CTL_7;
u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
u32 save_TV_CSC_Y;
u32 save_TV_CSC_Y2;
u32 save_TV_CSC_U;
u32 save_TV_CSC_U2;
u32 save_TV_CSC_V;
u32 save_TV_CSC_V2;
u32 save_TV_CLR_KNOBS;
u32 save_TV_CLR_LEVEL;
u32 save_TV_WIN_POS;
u32 save_TV_WIN_SIZE;
u32 save_TV_FILTER_CTL_1;
u32 save_TV_FILTER_CTL_2;
u32 save_TV_FILTER_CTL_3;
u32 save_TV_H_LUMA[60];
u32 save_TV_H_CHROMA[60];
u32 save_TV_V_LUMA[43];
u32 save_TV_V_CHROMA[43];
u32 save_TV_DAC;
u32 save_TV_CTL;
};
struct video_levels {
int blank, black, burst;
};
struct color_conversion {
u16 ry, gy, by, ay;
u16 ru, gu, bu, au;
u16 rv, gv, bv, av;
};
static const u32 filter_table[] = {
0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
0xB1403000, 0x2E203500, 0x35002E20, 0x3000B140,
0x35A0B160, 0x2DC02E80, 0xB1403480, 0xB1603000,
0x2EA03640, 0x34002D80, 0x3000B120, 0x36E0B160,
0x2D202EF0, 0xB1203380, 0xB1603000, 0x2F303780,
0x33002CC0, 0x3000B100, 0x3820B160, 0x2C802F50,
0xB10032A0, 0xB1603000, 0x2F9038C0, 0x32202C20,
0x3000B0E0, 0x3980B160, 0x2BC02FC0, 0xB0E031C0,
0xB1603000, 0x2FF03A20, 0x31602B60, 0xB020B0C0,
0x3AE0B160, 0x2B001810, 0xB0C03120, 0xB140B020,
0x18283BA0, 0x30C02A80, 0xB020B0A0, 0x3C60B140,
0x2A201838, 0xB0A03080, 0xB120B020, 0x18383D20,
0x304029C0, 0xB040B080, 0x3DE0B100, 0x29601848,
0xB0803000, 0xB100B040, 0x18483EC0, 0xB0402900,
0xB040B060, 0x3F80B0C0, 0x28801858, 0xB060B080,
0xB0A0B060, 0x18602820, 0xB0A02820, 0x0000B060,
0x36403000, 0x2D002CC0, 0x30003640, 0x2D0036C0,
0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
0x28003100, 0x28002F00, 0x00003100, 0x36403000,
0x2D002CC0, 0x30003640, 0x2D0036C0,
0x35C02CC0, 0x37403000, 0x2C802D40, 0x30003540,
0x2D8037C0, 0x34C02C40, 0x38403000, 0x2BC02E00,
0x30003440, 0x2E2038C0, 0x34002B80, 0x39803000,
0x2B402E40, 0x30003380, 0x2E603A00, 0x33402B00,
0x3A803040, 0x2A802EA0, 0x30403300, 0x2EC03B40,
0x32802A40, 0x3C003040, 0x2A002EC0, 0x30803240,
0x2EC03C80, 0x320029C0, 0x3D403080, 0x29402F00,
0x308031C0, 0x2F203DC0, 0x31802900, 0x3E8030C0,
0x28802F40, 0x30C03140, 0x2F203F40, 0x31402840,
0x28003100, 0x28002F00, 0x00003100,
};
/*
* Color conversion values have 3 separate fixed point formats:
*
* 10 bit fields (ay, au)
* 1.9 fixed point (b.bbbbbbbbb)
* 11 bit fields (ry, by, ru, gu, gv)
* exp.mantissa (ee.mmmmmmmmm)
* ee = 00 = 10^-1 (0.mmmmmmmmm)
* ee = 01 = 10^-2 (0.0mmmmmmmmm)
* ee = 10 = 10^-3 (0.00mmmmmmmmm)
* ee = 11 = 10^-4 (0.000mmmmmmmmm)
* 12 bit fields (gy, rv, bu)
* exp.mantissa (eee.mmmmmmmmm)
* eee = 000 = 10^-1 (0.mmmmmmmmm)
* eee = 001 = 10^-2 (0.0mmmmmmmmm)
* eee = 010 = 10^-3 (0.00mmmmmmmmm)
* eee = 011 = 10^-4 (0.000mmmmmmmmm)
* eee = 100 = reserved
* eee = 101 = reserved
* eee = 110 = reserved
* eee = 111 = 10^0 (m.mmmmmmmm) (only usable for 1.0 representation)
*
* Saturation and contrast are 8 bits, with their own representation:
* 8 bit field (saturation, contrast)
* exp.mantissa (ee.mmmmmm)
* ee = 00 = 10^-1 (0.mmmmmm)
* ee = 01 = 10^0 (m.mmmmm)
* ee = 10 = 10^1 (mm.mmmm)
* ee = 11 = 10^2 (mmm.mmm)
*
* Simple conversion function:
*
* static u32
* float_to_csc_11(float f)
* {
* u32 exp;
* u32 mant;
* u32 ret;
*
* if (f < 0)
* f = -f;
*
* if (f >= 1) {
* exp = 0x7;
* mant = 1 << 8;
* } else {
* for (exp = 0; exp < 3 && f < 0.5; exp++)
* f *= 2.0;
* mant = (f * (1 << 9) + 0.5);
* if (mant >= (1 << 9))
* mant = (1 << 9) - 1;
* }
* ret = (exp << 9) | mant;
* return ret;
* }
*/
/*
* Behold, magic numbers! If we plant them they might grow a big
* s-video cable to the sky... or something.
*
* Pre-converted to appropriate hex value.
*/
/*
* PAL & NTSC values for composite & s-video connections
*/
static const struct color_conversion ntsc_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_composite = {
.blank = 225, .black = 267, .burst = 113,
};
static const struct color_conversion ntsc_m_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_svideo = {
.blank = 266, .black = 316, .burst = 133,
};
static const struct color_conversion ntsc_j_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_composite = {
.blank = 225, .black = 225, .burst = 113,
};
static const struct color_conversion ntsc_j_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_svideo = {
.blank = 266, .black = 266, .burst = 133,
};
static const struct color_conversion pal_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
};
static const struct video_levels pal_levels_composite = {
.blank = 237, .black = 237, .burst = 118,
};
static const struct color_conversion pal_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
};
static const struct video_levels pal_levels_svideo = {
.blank = 280, .black = 280, .burst = 139,
};
static const struct color_conversion pal_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_m_levels_composite = {
.blank = 225, .black = 267, .burst = 113,
};
static const struct color_conversion pal_m_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_m_levels_svideo = {
.blank = 266, .black = 316, .burst = 133,
};
static const struct color_conversion pal_n_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_n_levels_composite = {
.blank = 225, .black = 267, .burst = 118,
};
static const struct color_conversion pal_n_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_n_levels_svideo = {
.blank = 266, .black = 316, .burst = 139,
};
/*
* Component connections
*/
static const struct color_conversion sdtv_csc_yprpb = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
static const struct color_conversion sdtv_csc_rgb = {
.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
};
static const struct color_conversion hdtv_csc_yprpb = {
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
static const struct color_conversion hdtv_csc_rgb = {
.ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
.ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
.rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
};
static const struct video_levels component_levels = {
.blank = 279, .black = 279, .burst = 0,
};
struct tv_mode {
const char *name;
int clock;
int refresh; /* in millihertz (for precision) */
u32 oversample;
int hsync_end, hblank_start, hblank_end, htotal;
bool progressive, trilevel_sync, component_only;
int vsync_start_f1, vsync_start_f2, vsync_len;
bool veq_ena;
int veq_start_f1, veq_start_f2, veq_len;
int vi_end_f1, vi_end_f2, nbr_end;
bool burst_ena;
int hburst_start, hburst_len;
int vburst_start_f1, vburst_end_f1;
int vburst_start_f2, vburst_end_f2;
int vburst_start_f3, vburst_end_f3;
int vburst_start_f4, vburst_end_f4;
/*
* subcarrier programming
*/
int dda2_size, dda3_size, dda1_inc, dda2_inc, dda3_inc;
u32 sc_reset;
bool pal_burst;
/*
* blank/black levels
*/
const struct video_levels *composite_levels, *svideo_levels;
const struct color_conversion *composite_color, *svideo_color;
const u32 *filter_table;
int max_srcw;
};
/*
* Sub carrier DDA
*
* I think this works as follows:
*
* subcarrier freq = pixel_clock * (dda1_inc + dda2_inc / dda2_size) / 4096
*
* Presumably, when dda3 is added in, it gets to adjust the dda2_inc value
*
* So,
* dda1_ideal = subcarrier/pixel * 4096
* dda1_inc = floor (dda1_ideal)
* dda2 = dda1_ideal - dda1_inc
*
* then pick a ratio for dda2 that gives the closest approximation. If
* you can't get close enough, you can play with dda3 as well. This
* seems likely to happen when dda2 is small as the jumps would be larger
*
* To invert this,
*
* pixel_clock = subcarrier * 4096 / (dda1_inc + dda2_inc / dda2_size)
*
* The constants below were all computed using a 107.520MHz clock
*/
/**
* Register programming values for TV modes.
*
* These values account for -1s required.
*/
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
.clock = 108000,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
.hblank_start = 836, .htotal = 857,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
.burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
.vburst_start_f3 = 9, .vburst_end_f3 = 240,
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 135,
.dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
.composite_levels = &ntsc_m_levels_composite,
.composite_color = &ntsc_m_csc_composite,
.svideo_levels = &ntsc_m_levels_svideo,
.svideo_color = &ntsc_m_csc_svideo,
.filter_table = filter_table,
},
{
.name = "NTSC-443",
.clock = 108000,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
.hsync_end = 64, .hblank_end = 124,
.hblank_start = 836, .htotal = 857,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
.burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
.vburst_start_f3 = 9, .vburst_end_f3 = 240,
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
.dda2_inc = 4093, .dda2_size = 27456,
.dda3_inc = 310, .dda3_size = 525,
.sc_reset = TV_SC_RESET_NEVER,
.pal_burst = false,
.composite_levels = &ntsc_m_levels_composite,
.composite_color = &ntsc_m_csc_composite,
.svideo_levels = &ntsc_m_levels_svideo,
.svideo_color = &ntsc_m_csc_svideo,
.filter_table = filter_table,
},
{
.name = "NTSC-J",
.clock = 108000,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
.hblank_start = 836, .htotal = 857,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
.burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
.vburst_start_f3 = 9, .vburst_end_f3 = 240,
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 135,
.dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
.composite_levels = &ntsc_j_levels_composite,
.composite_color = &ntsc_j_csc_composite,
.svideo_levels = &ntsc_j_levels_svideo,
.svideo_color = &ntsc_j_csc_svideo,
.filter_table = filter_table,
},
{
.name = "PAL-M",
.clock = 108000,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
.hsync_end = 64, .hblank_end = 124,
.hblank_start = 836, .htotal = 857,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
.burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
.vburst_start_f3 = 9, .vburst_end_f3 = 240,
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 135,
.dda2_inc = 16704, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
.composite_levels = &pal_m_levels_composite,
.composite_color = &pal_m_csc_composite,
.svideo_levels = &pal_m_levels_svideo,
.svideo_color = &pal_m_csc_svideo,
.filter_table = filter_table,
},
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
.clock = 108000,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
.hsync_end = 64, .hblank_end = 128,
.hblank_start = 844, .htotal = 863,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 6, .vsync_start_f2 = 7,
.vsync_len = 6,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 18,
.vi_end_f1 = 24, .vi_end_f2 = 25,
.nbr_end = 286,
.burst_ena = true,
.hburst_start = 73, .hburst_len = 34,
.vburst_start_f1 = 8, .vburst_end_f1 = 285,
.vburst_start_f2 = 8, .vburst_end_f2 = 286,
.vburst_start_f3 = 9, .vburst_end_f3 = 286,
.vburst_start_f4 = 9, .vburst_end_f4 = 285,
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 135,
.dda2_inc = 23578, .dda2_size = 27648,
.dda3_inc = 134, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
.composite_levels = &pal_n_levels_composite,
.composite_color = &pal_n_csc_composite,
.svideo_levels = &pal_n_levels_svideo,
.svideo_color = &pal_n_csc_svideo,
.filter_table = filter_table,
},
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
.clock = 108000,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
.hsync_end = 64, .hblank_end = 142,
.hblank_start = 844, .htotal = 863,
.progressive = false, .trilevel_sync = false,
.vsync_start_f1 = 5, .vsync_start_f2 = 6,
.vsync_len = 5,
.veq_ena = true, .veq_start_f1 = 0,
.veq_start_f2 = 1, .veq_len = 15,
.vi_end_f1 = 24, .vi_end_f2 = 25,
.nbr_end = 286,
.burst_ena = true,
.hburst_start = 73, .hburst_len = 32,
.vburst_start_f1 = 8, .vburst_end_f1 = 285,
.vburst_start_f2 = 8, .vburst_end_f2 = 286,
.vburst_start_f3 = 9, .vburst_end_f3 = 286,
.vburst_start_f4 = 9, .vburst_end_f4 = 285,
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
.dda2_inc = 4122, .dda2_size = 27648,
.dda3_inc = 67, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
.composite_levels = &pal_levels_composite,
.composite_color = &pal_csc_composite,
.svideo_levels = &pal_levels_svideo,
.svideo_color = &pal_csc_svideo,
.filter_table = filter_table,
},
{
.name = "480p",
.clock = 107520,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 857,
.progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 12, .vsync_start_f2 = 12,
.vsync_len = 12,
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
.nbr_end = 479,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "576p",
.clock = 107520,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
.hsync_end = 64, .hblank_end = 139,
.hblank_start = 859, .htotal = 863,
.progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
.veq_ena = false,
.vi_end_f1 = 48, .vi_end_f2 = 48,
.nbr_end = 575,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1649,
.progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
.veq_ena = false,
.vi_end_f1 = 29, .vi_end_f2 = 29,
.nbr_end = 719,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "720p@50Hz",
.clock = 148800,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 80, .hblank_end = 300,
.hblank_start = 1580, .htotal = 1979,
.progressive = true, .trilevel_sync = true,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
.veq_ena = false,
.vi_end_f1 = 29, .vi_end_f2 = 29,
.nbr_end = 719,
.burst_ena = false,
.filter_table = filter_table,
.max_srcw = 800
},
{
.name = "1080i@50Hz",
.clock = 148800,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2639,
.progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
.veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
.vi_end_f1 = 21, .vi_end_f2 = 22,
.nbr_end = 539,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "1080i@60Hz",
.clock = 148800,
.refresh = 60000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2199,
.progressive = false, .trilevel_sync = true,
.vsync_start_f1 = 4, .vsync_start_f2 = 5,
.vsync_len = 10,
.veq_ena = true, .veq_start_f1 = 4,
.veq_start_f2 = 4, .veq_len = 10,
.vi_end_f1 = 21, .vi_end_f2 = 22,
.nbr_end = 539,
.burst_ena = false,
.filter_table = filter_table,
},
};
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_tv, base.base);
}
static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_tv,
base);
}
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
switch (mode) {
case DRM_MODE_DPMS_ON:
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
break;
}
}
static const struct tv_mode *
intel_tv_mode_lookup(const char *tv_format)
{
int i;
for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
return tv_mode;
}
return NULL;
}
static const struct tv_mode *
intel_tv_mode_find(struct intel_tv *intel_tv)
{
return intel_tv_mode_lookup(intel_tv->tv_format);
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
< 1000)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
static bool
intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_mode_config *drm_config = &dev->mode_config;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
struct drm_encoder *other_encoder;
if (!tv_mode)
return false;
/* FIXME: lock encoder list */
list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
if (other_encoder != encoder &&
other_encoder->crtc == encoder->crtc)
return false;
}
adjusted_mode->clock = tv_mode->clock;
return true;
}
static void
intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
u32 scctl1, scctl2, scctl3;
int i, j;
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
int pipe = intel_crtc->pipe;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
default:
case DRM_MODE_CONNECTOR_Unknown:
case DRM_MODE_CONNECTOR_Composite:
tv_ctl |= TV_ENC_OUTPUT_COMPOSITE;
video_levels = tv_mode->composite_levels;
color_conversion = tv_mode->composite_color;
burst_ena = tv_mode->burst_ena;
break;
case DRM_MODE_CONNECTOR_Component:
tv_ctl |= TV_ENC_OUTPUT_COMPONENT;
video_levels = &component_levels;
if (tv_mode->burst_ena)
color_conversion = &sdtv_csc_yprpb;
else
color_conversion = &hdtv_csc_yprpb;
burst_ena = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
tv_ctl |= TV_ENC_OUTPUT_SVIDEO;
video_levels = tv_mode->svideo_levels;
color_conversion = tv_mode->svideo_color;
burst_ena = tv_mode->burst_ena;
break;
}
hctl1 = (tv_mode->hsync_end << TV_HSYNC_END_SHIFT) |
(tv_mode->htotal << TV_HTOTAL_SHIFT);
hctl2 = (tv_mode->hburst_start << 16) |
(tv_mode->hburst_len << TV_HBURST_LEN_SHIFT);
if (burst_ena)
hctl2 |= TV_BURST_ENA;
hctl3 = (tv_mode->hblank_start << TV_HBLANK_START_SHIFT) |
(tv_mode->hblank_end << TV_HBLANK_END_SHIFT);
vctl1 = (tv_mode->nbr_end << TV_NBR_END_SHIFT) |
(tv_mode->vi_end_f1 << TV_VI_END_F1_SHIFT) |
(tv_mode->vi_end_f2 << TV_VI_END_F2_SHIFT);
vctl2 = (tv_mode->vsync_len << TV_VSYNC_LEN_SHIFT) |
(tv_mode->vsync_start_f1 << TV_VSYNC_START_F1_SHIFT) |
(tv_mode->vsync_start_f2 << TV_VSYNC_START_F2_SHIFT);
vctl3 = (tv_mode->veq_len << TV_VEQ_LEN_SHIFT) |
(tv_mode->veq_start_f1 << TV_VEQ_START_F1_SHIFT) |
(tv_mode->veq_start_f2 << TV_VEQ_START_F2_SHIFT);
if (tv_mode->veq_ena)
vctl3 |= TV_EQUAL_ENA;
vctl4 = (tv_mode->vburst_start_f1 << TV_VBURST_START_F1_SHIFT) |
(tv_mode->vburst_end_f1 << TV_VBURST_END_F1_SHIFT);
vctl5 = (tv_mode->vburst_start_f2 << TV_VBURST_START_F2_SHIFT) |
(tv_mode->vburst_end_f2 << TV_VBURST_END_F2_SHIFT);
vctl6 = (tv_mode->vburst_start_f3 << TV_VBURST_START_F3_SHIFT) |
(tv_mode->vburst_end_f3 << TV_VBURST_END_F3_SHIFT);
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
if (intel_crtc->pipe == 1)
tv_ctl |= TV_ENC_PIPEB_SELECT;
tv_ctl |= tv_mode->oversample;
if (tv_mode->progressive)
tv_ctl |= TV_PROGRESSIVE;
if (tv_mode->trilevel_sync)
tv_ctl |= TV_TRILEVEL_SYNC;
if (tv_mode->pal_burst)
tv_ctl |= TV_PAL_BURST;
scctl1 = 0;
if (tv_mode->dda1_inc)
scctl1 |= TV_SC_DDA1_EN;
if (tv_mode->dda2_inc)
scctl1 |= TV_SC_DDA2_EN;
if (tv_mode->dda3_inc)
scctl1 |= TV_SC_DDA3_EN;
scctl1 |= tv_mode->sc_reset;
if (video_levels)
scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
tv_mode->dda2_inc << TV_SCDDA2_INC_SHIFT;
scctl3 = tv_mode->dda3_size << TV_SCDDA3_SIZE_SHIFT |
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
/* Enable two fixes for the chips that need them. */
if (dev->pci_device < 0x2772)
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
I915_WRITE(TV_H_CTL_1, hctl1);
I915_WRITE(TV_H_CTL_2, hctl2);
I915_WRITE(TV_H_CTL_3, hctl3);
I915_WRITE(TV_V_CTL_1, vctl1);
I915_WRITE(TV_V_CTL_2, vctl2);
I915_WRITE(TV_V_CTL_3, vctl3);
I915_WRITE(TV_V_CTL_4, vctl4);
I915_WRITE(TV_V_CTL_5, vctl5);
I915_WRITE(TV_V_CTL_6, vctl6);
I915_WRITE(TV_V_CTL_7, vctl7);
I915_WRITE(TV_SC_CTL_1, scctl1);
I915_WRITE(TV_SC_CTL_2, scctl2);
I915_WRITE(TV_SC_CTL_3, scctl3);
if (color_conversion) {
I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
color_conversion->gy);
I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
color_conversion->ay);
I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
color_conversion->gu);
I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
color_conversion->au);
I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
color_conversion->gv);
I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
color_conversion->av);
}
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
if (video_levels)
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
int pipeconf_reg = PIPECONF(pipe);
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
/* Wait for vblank for the disable to take effect. */
intel_wait_for_pipe_off(dev, intel_crtc->pipe);
/* Filter ctl must be set before TV_WIN_SIZE */
I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
if (tv_mode->progressive)
ysize = tv_mode->nbr_end + 1;
else
ysize = 2*tv_mode->nbr_end + 1;
xpos += intel_tv->margin[TV_MARGIN_LEFT];
ypos += intel_tv->margin[TV_MARGIN_TOP];
xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
intel_tv->margin[TV_MARGIN_RIGHT]);
ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
intel_tv->margin[TV_MARGIN_BOTTOM]);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
}
j = 0;
for (i = 0; i < 60; i++)
I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl);
}
static const struct drm_display_mode reported_modes[] = {
{
.name = "NTSC 480i",
.clock = 107520,
.hdisplay = 1280,
.hsync_start = 1368,
.hsync_end = 1496,
.htotal = 1712,
.vdisplay = 1024,
.vsync_start = 1027,
.vsync_end = 1034,
.vtotal = 1104,
.type = DRM_MODE_TYPE_DRIVER,
},
};
/**
* Detects TV presence by checking for load.
*
* Requires that the current pipe's DPLL is active.
* \return true if TV is connected.
* \return false if TV is disconnected.
*/
static int
intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
/* Disable TV interrupts around load detect or we'll recurse */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
if (intel_crtc->pipe == 1)
tv_ctl |= TV_ENC_PIPEB_SELECT;
else
tv_ctl &= ~TV_ENC_PIPEB_SELECT;
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL |
TVDAC_C_SENSE_CTL |
DAC_CTL_OVERRIDE |
DAC_A_0_7_V |
DAC_B_0_7_V |
DAC_C_0_7_V);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
intel_wait_for_vblank(intel_tv->base.base.dev,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
type = -1;
tv_dac = I915_READ(TV_DAC);
DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
* 1 0 X svideo
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
DRM_DEBUG_KMS("Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
DRM_DEBUG_KMS("Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
DRM_DEBUG_KMS("Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
DRM_DEBUG_KMS("Unrecognised TV connection\n");
type = -1;
}
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(intel_tv->base.base.dev,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0,
PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
return type;
}
/*
* Here we set accurate tv format according to connector type
* i.e Component TV should not be assigned by NTSC or PAL
*/
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int i;
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
return;
for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
tv_mode = tv_modes + i;
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
tv_mode->component_only)
break;
}
intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
connector->dev->mode_config.tv_mode_property, i);
}
/**
* Detect the TV connection.
*
* Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure
* we have a pipe programmed in order to probe the TV.
*/
static enum drm_connector_status
intel_tv_detect(struct drm_connector *connector, bool force)
{
struct drm_display_mode mode;
struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, 0);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);
} else if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,
&mode, &tmp)) {
type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(&intel_tv->base,
connector,
&tmp);
} else
return connector_status_unknown;
} else
return connector->status;
if (type < 0)
return connector_status_disconnected;
intel_tv->type = type;
intel_tv_find_better_format(connector);
return connector_status_connected;
}
static const struct input_res {
const char *name;
int w, h;
} input_res_table[] = {
{"640x480", 640, 480},
{"800x600", 800, 600},
{"1024x768", 1024, 768},
{"1280x1024", 1280, 1024},
{"848x480", 848, 480},
{"1280x720", 1280, 720},
{"1920x1080", 1920, 1080},
};
/*
* Chose preferred mode according to line number of TV format
*/
static void
intel_tv_chose_preferred_modes(struct drm_connector *connector,
struct drm_display_mode *mode_ptr)
{
struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
else if (tv_mode->nbr_end > 480) {
if (tv_mode->progressive == true && tv_mode->nbr_end < 720) {
if (mode_ptr->vdisplay == 720)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
} else if (mode_ptr->vdisplay == 1080)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
}
}
/**
* Stub get_modes function.
*
* This should probably return a set of fixed modes, unless we can figure out
* how to probe modes off of TV connections.
*/
static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
struct intel_tv *intel_tv = intel_attached_tv(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
int j, count = 0;
u64 tmp;
for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
unsigned int vactive_s = input->h;
if (tv_mode->max_srcw && input->w > tv_mode->max_srcw)
continue;
if (input->w > 1024 && (!tv_mode->progressive
&& !tv_mode->component_only))
continue;
mode_ptr = drm_mode_create(connector->dev);
if (!mode_ptr)
continue;
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
mode_ptr->hsync_start = hactive_s + 1;
mode_ptr->hsync_end = hactive_s + 64;
if (mode_ptr->hsync_end <= mode_ptr->hsync_start)
mode_ptr->hsync_end = mode_ptr->hsync_start + 1;
mode_ptr->htotal = hactive_s + 96;
mode_ptr->vdisplay = vactive_s;
mode_ptr->vsync_start = vactive_s + 1;
mode_ptr->vsync_end = vactive_s + 32;
if (mode_ptr->vsync_end <= mode_ptr->vsync_start)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
tmp *= mode_ptr->htotal;
tmp = div_u64(tmp, 1000000);
mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
intel_tv_chose_preferred_modes(connector, mode_ptr);
drm_mode_probed_add(connector, mode_ptr);
count++;
}
return count;
}
static void
intel_tv_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int
intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct intel_tv *intel_tv = intel_attached_tv(connector);
struct drm_crtc *crtc = intel_tv->base.base.crtc;
int ret = 0;
bool changed = false;
ret = drm_connector_property_set_value(connector, property, val);
if (ret < 0)
goto out;
if (property == dev->mode_config.tv_left_margin_property &&
intel_tv->margin[TV_MARGIN_LEFT] != val) {
intel_tv->margin[TV_MARGIN_LEFT] = val;
changed = true;
} else if (property == dev->mode_config.tv_right_margin_property &&
intel_tv->margin[TV_MARGIN_RIGHT] != val) {
intel_tv->margin[TV_MARGIN_RIGHT] = val;
changed = true;
} else if (property == dev->mode_config.tv_top_margin_property &&
intel_tv->margin[TV_MARGIN_TOP] != val) {
intel_tv->margin[TV_MARGIN_TOP] = val;
changed = true;
} else if (property == dev->mode_config.tv_bottom_margin_property &&
intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
goto out;
intel_tv->tv_format = tv_modes[val].name;
changed = true;
} else {
ret = -EINVAL;
goto out;
}
if (changed && crtc)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
out:
return ret;
}
static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
.dpms = intel_tv_dpms,
.mode_fixup = intel_tv_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_tv_mode_set,
.commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_tv_detect,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
.fill_modes = drm_helper_probe_single_connector_modes,
};
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
.best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_encoder_destroy,
};
/*
* Enumerate the child dev array parsed from VBT to check whether
* the integrated TV is present.
* If it is present, return 1.
* If it is not present, return false.
* If no child dev is parsed from VBT, it assumes that the TV is present.
*/
static int tv_is_present_in_vbt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct child_device_config *p_child;
int i, ret;
if (!dev_priv->child_dev_num)
return 1;
ret = 0;
for (i = 0; i < dev_priv->child_dev_num; i++) {
p_child = dev_priv->child_dev + i;
/*
* If the device type is not TV, continue.
*/
if (p_child->device_type != DEVICE_TYPE_INT_TV &&
p_child->device_type != DEVICE_TYPE_TV)
continue;
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
if (p_child->addin_offset) {
ret = 1;
break;
}
}
return ret;
}
void
intel_tv_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
if (!tv_is_present_in_vbt(dev)) {
DRM_DEBUG_KMS("Integrated TV is not present.\n");
return;
}
/* Even if we have an encoder we may not have a connector */
if (!dev_priv->int_tv_support)
return;
/*
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
save_tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
tv_dac_on = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
tv_dac_off = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
* bit, (either as a 0 or a 1), assume it doesn't really
* exist
*/
if ((tv_dac_on & TVDAC_STATE_CHG_EN) == 0 ||
(tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
return;
intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
if (!intel_tv) {
return;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_tv);
return;
}
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
/* The documentation, for the older chipsets at least, recommend
* using a polling method rather than hotplug detection for TVs.
* This is because in order to perform the hotplug detection, the PLLs
* for the TV must be kept alive increasing power drain and starving
* bandwidth from other encoders. Notably for instance, it causes
* pipe underruns on Crestline when this encoder is supposedly idle.
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
DRM_MODE_ENCODER_TVDAC);
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
intel_tv->margin[TV_MARGIN_LEFT] = 54;
intel_tv->margin[TV_MARGIN_TOP] = 36;
intel_tv->margin[TV_MARGIN_RIGHT] = 46;
intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
intel_tv->tv_format = tv_modes[initial_mode].name;
drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
tv_format_names[i] = (char *)tv_modes[i].name;
drm_mode_create_tv_properties(dev,
ARRAY_SIZE(tv_modes),
tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
drm_connector_attach_property(connector,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
drm_sysfs_connector_add(connector);
}
| gpl-2.0 |
wwenigma/cocktail-kernel-msm7x30 | arch/arm/mach-tegra/pinmux.c | 2527 | 19888 | /*
* linux/arch/arm/mach-tegra/pinmux.c
*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <mach/iomap.h>
#include <mach/pinmux.h>
#define HSM_EN(reg) (((reg) >> 2) & 0x1)
#define SCHMT_EN(reg) (((reg) >> 3) & 0x1)
#define LPMD(reg) (((reg) >> 4) & 0x3)
#define DRVDN(reg) (((reg) >> 12) & 0x1f)
#define DRVUP(reg) (((reg) >> 20) & 0x1f)
#define SLWR(reg) (((reg) >> 28) & 0x3)
#define SLWF(reg) (((reg) >> 30) & 0x3)
static const struct tegra_pingroup_desc *const pingroups = tegra_soc_pingroups;
static const struct tegra_drive_pingroup_desc *const drive_pingroups = tegra_soc_drive_pingroups;
static char *tegra_mux_names[TEGRA_MAX_MUX] = {
[TEGRA_MUX_AHB_CLK] = "AHB_CLK",
[TEGRA_MUX_APB_CLK] = "APB_CLK",
[TEGRA_MUX_AUDIO_SYNC] = "AUDIO_SYNC",
[TEGRA_MUX_CRT] = "CRT",
[TEGRA_MUX_DAP1] = "DAP1",
[TEGRA_MUX_DAP2] = "DAP2",
[TEGRA_MUX_DAP3] = "DAP3",
[TEGRA_MUX_DAP4] = "DAP4",
[TEGRA_MUX_DAP5] = "DAP5",
[TEGRA_MUX_DISPLAYA] = "DISPLAYA",
[TEGRA_MUX_DISPLAYB] = "DISPLAYB",
[TEGRA_MUX_EMC_TEST0_DLL] = "EMC_TEST0_DLL",
[TEGRA_MUX_EMC_TEST1_DLL] = "EMC_TEST1_DLL",
[TEGRA_MUX_GMI] = "GMI",
[TEGRA_MUX_GMI_INT] = "GMI_INT",
[TEGRA_MUX_HDMI] = "HDMI",
[TEGRA_MUX_I2C] = "I2C",
[TEGRA_MUX_I2C2] = "I2C2",
[TEGRA_MUX_I2C3] = "I2C3",
[TEGRA_MUX_IDE] = "IDE",
[TEGRA_MUX_IRDA] = "IRDA",
[TEGRA_MUX_KBC] = "KBC",
[TEGRA_MUX_MIO] = "MIO",
[TEGRA_MUX_MIPI_HS] = "MIPI_HS",
[TEGRA_MUX_NAND] = "NAND",
[TEGRA_MUX_OSC] = "OSC",
[TEGRA_MUX_OWR] = "OWR",
[TEGRA_MUX_PCIE] = "PCIE",
[TEGRA_MUX_PLLA_OUT] = "PLLA_OUT",
[TEGRA_MUX_PLLC_OUT1] = "PLLC_OUT1",
[TEGRA_MUX_PLLM_OUT1] = "PLLM_OUT1",
[TEGRA_MUX_PLLP_OUT2] = "PLLP_OUT2",
[TEGRA_MUX_PLLP_OUT3] = "PLLP_OUT3",
[TEGRA_MUX_PLLP_OUT4] = "PLLP_OUT4",
[TEGRA_MUX_PWM] = "PWM",
[TEGRA_MUX_PWR_INTR] = "PWR_INTR",
[TEGRA_MUX_PWR_ON] = "PWR_ON",
[TEGRA_MUX_RTCK] = "RTCK",
[TEGRA_MUX_SDIO1] = "SDIO1",
[TEGRA_MUX_SDIO2] = "SDIO2",
[TEGRA_MUX_SDIO3] = "SDIO3",
[TEGRA_MUX_SDIO4] = "SDIO4",
[TEGRA_MUX_SFLASH] = "SFLASH",
[TEGRA_MUX_SPDIF] = "SPDIF",
[TEGRA_MUX_SPI1] = "SPI1",
[TEGRA_MUX_SPI2] = "SPI2",
[TEGRA_MUX_SPI2_ALT] = "SPI2_ALT",
[TEGRA_MUX_SPI3] = "SPI3",
[TEGRA_MUX_SPI4] = "SPI4",
[TEGRA_MUX_TRACE] = "TRACE",
[TEGRA_MUX_TWC] = "TWC",
[TEGRA_MUX_UARTA] = "UARTA",
[TEGRA_MUX_UARTB] = "UARTB",
[TEGRA_MUX_UARTC] = "UARTC",
[TEGRA_MUX_UARTD] = "UARTD",
[TEGRA_MUX_UARTE] = "UARTE",
[TEGRA_MUX_ULPI] = "ULPI",
[TEGRA_MUX_VI] = "VI",
[TEGRA_MUX_VI_SENSOR_CLK] = "VI_SENSOR_CLK",
[TEGRA_MUX_XIO] = "XIO",
[TEGRA_MUX_SAFE] = "<safe>",
};
static const char *tegra_drive_names[TEGRA_MAX_DRIVE] = {
[TEGRA_DRIVE_DIV_8] = "DIV_8",
[TEGRA_DRIVE_DIV_4] = "DIV_4",
[TEGRA_DRIVE_DIV_2] = "DIV_2",
[TEGRA_DRIVE_DIV_1] = "DIV_1",
};
static const char *tegra_slew_names[TEGRA_MAX_SLEW] = {
[TEGRA_SLEW_FASTEST] = "FASTEST",
[TEGRA_SLEW_FAST] = "FAST",
[TEGRA_SLEW_SLOW] = "SLOW",
[TEGRA_SLEW_SLOWEST] = "SLOWEST",
};
static DEFINE_SPINLOCK(mux_lock);
static const char *pingroup_name(enum tegra_pingroup pg)
{
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return "<UNKNOWN>";
return pingroups[pg].name;
}
static const char *func_name(enum tegra_mux_func func)
{
if (func == TEGRA_MUX_RSVD1)
return "RSVD1";
if (func == TEGRA_MUX_RSVD2)
return "RSVD2";
if (func == TEGRA_MUX_RSVD3)
return "RSVD3";
if (func == TEGRA_MUX_RSVD4)
return "RSVD4";
if (func == TEGRA_MUX_NONE)
return "NONE";
if (func < 0 || func >= TEGRA_MAX_MUX)
return "<UNKNOWN>";
return tegra_mux_names[func];
}
static const char *tri_name(unsigned long val)
{
return val ? "TRISTATE" : "NORMAL";
}
static const char *pupd_name(unsigned long val)
{
switch (val) {
case 0:
return "NORMAL";
case 1:
return "PULL_DOWN";
case 2:
return "PULL_UP";
default:
return "RSVD";
}
}
static inline unsigned long pg_readl(unsigned long offset)
{
return readl(IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
}
static inline void pg_writel(unsigned long value, unsigned long offset)
{
writel(value, IO_TO_VIRT(TEGRA_APB_MISC_BASE + offset));
}
static int tegra_pinmux_set_func(const struct tegra_pingroup_config *config)
{
int mux = -1;
int i;
unsigned long reg;
unsigned long flags;
enum tegra_pingroup pg = config->pingroup;
enum tegra_mux_func func = config->func;
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
if (pingroups[pg].mux_reg < 0)
return -EINVAL;
if (func < 0)
return -ERANGE;
if (func == TEGRA_MUX_SAFE)
func = pingroups[pg].func_safe;
if (func & TEGRA_MUX_RSVD) {
mux = func & 0x3;
} else {
for (i = 0; i < 4; i++) {
if (pingroups[pg].funcs[i] == func) {
mux = i;
break;
}
}
}
if (mux < 0)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(pingroups[pg].mux_reg);
reg &= ~(0x3 << pingroups[pg].mux_bit);
reg |= mux << pingroups[pg].mux_bit;
pg_writel(reg, pingroups[pg].mux_reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
int tegra_pinmux_set_tristate(enum tegra_pingroup pg,
enum tegra_tristate tristate)
{
unsigned long reg;
unsigned long flags;
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
if (pingroups[pg].tri_reg < 0)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(pingroups[pg].tri_reg);
reg &= ~(0x1 << pingroups[pg].tri_bit);
if (tristate)
reg |= 1 << pingroups[pg].tri_bit;
pg_writel(reg, pingroups[pg].tri_reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
int tegra_pinmux_set_pullupdown(enum tegra_pingroup pg,
enum tegra_pullupdown pupd)
{
unsigned long reg;
unsigned long flags;
if (pg < 0 || pg >= TEGRA_MAX_PINGROUP)
return -ERANGE;
if (pingroups[pg].pupd_reg < 0)
return -EINVAL;
if (pupd != TEGRA_PUPD_NORMAL &&
pupd != TEGRA_PUPD_PULL_DOWN &&
pupd != TEGRA_PUPD_PULL_UP)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(pingroups[pg].pupd_reg);
reg &= ~(0x3 << pingroups[pg].pupd_bit);
reg |= pupd << pingroups[pg].pupd_bit;
pg_writel(reg, pingroups[pg].pupd_reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static void tegra_pinmux_config_pingroup(const struct tegra_pingroup_config *config)
{
enum tegra_pingroup pingroup = config->pingroup;
enum tegra_mux_func func = config->func;
enum tegra_pullupdown pupd = config->pupd;
enum tegra_tristate tristate = config->tristate;
int err;
if (pingroups[pingroup].mux_reg >= 0) {
err = tegra_pinmux_set_func(config);
if (err < 0)
pr_err("pinmux: can't set pingroup %s func to %s: %d\n",
pingroup_name(pingroup), func_name(func), err);
}
if (pingroups[pingroup].pupd_reg >= 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown to %s: %d\n",
pingroup_name(pingroup), pupd_name(pupd), err);
}
if (pingroups[pingroup].tri_reg >= 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate to %s: %d\n",
pingroup_name(pingroup), tri_name(func), err);
}
}
void tegra_pinmux_config_table(const struct tegra_pingroup_config *config, int len)
{
int i;
for (i = 0; i < len; i++)
tegra_pinmux_config_pingroup(&config[i]);
}
static const char *drive_pinmux_name(enum tegra_drive_pingroup pg)
{
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return "<UNKNOWN>";
return drive_pingroups[pg].name;
}
static const char *enable_name(unsigned long val)
{
return val ? "ENABLE" : "DISABLE";
}
static const char *drive_name(unsigned long val)
{
if (val >= TEGRA_MAX_DRIVE)
return "<UNKNOWN>";
return tegra_drive_names[val];
}
static const char *slew_name(unsigned long val)
{
if (val >= TEGRA_MAX_SLEW)
return "<UNKNOWN>";
return tegra_slew_names[val];
}
static int tegra_drive_pinmux_set_hsm(enum tegra_drive_pingroup pg,
enum tegra_hsm hsm)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (hsm != TEGRA_HSM_ENABLE && hsm != TEGRA_HSM_DISABLE)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
if (hsm == TEGRA_HSM_ENABLE)
reg |= (1 << 2);
else
reg &= ~(1 << 2);
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_schmitt(enum tegra_drive_pingroup pg,
enum tegra_schmitt schmitt)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (schmitt != TEGRA_SCHMITT_ENABLE && schmitt != TEGRA_SCHMITT_DISABLE)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
if (schmitt == TEGRA_SCHMITT_ENABLE)
reg |= (1 << 3);
else
reg &= ~(1 << 3);
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_drive(enum tegra_drive_pingroup pg,
enum tegra_drive drive)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (drive < 0 || drive >= TEGRA_MAX_DRIVE)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
reg &= ~(0x3 << 4);
reg |= drive << 4;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_pull_down(enum tegra_drive_pingroup pg,
enum tegra_pull_strength pull_down)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (pull_down < 0 || pull_down >= TEGRA_MAX_PULL)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
reg &= ~(0x1f << 12);
reg |= pull_down << 12;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_pull_up(enum tegra_drive_pingroup pg,
enum tegra_pull_strength pull_up)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (pull_up < 0 || pull_up >= TEGRA_MAX_PULL)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
reg &= ~(0x1f << 12);
reg |= pull_up << 12;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_slew_rising(enum tegra_drive_pingroup pg,
enum tegra_slew slew_rising)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (slew_rising < 0 || slew_rising >= TEGRA_MAX_SLEW)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
reg &= ~(0x3 << 28);
reg |= slew_rising << 28;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static int tegra_drive_pinmux_set_slew_falling(enum tegra_drive_pingroup pg,
enum tegra_slew slew_falling)
{
unsigned long flags;
u32 reg;
if (pg < 0 || pg >= TEGRA_MAX_DRIVE_PINGROUP)
return -ERANGE;
if (slew_falling < 0 || slew_falling >= TEGRA_MAX_SLEW)
return -EINVAL;
spin_lock_irqsave(&mux_lock, flags);
reg = pg_readl(drive_pingroups[pg].reg);
reg &= ~(0x3 << 30);
reg |= slew_falling << 30;
pg_writel(reg, drive_pingroups[pg].reg);
spin_unlock_irqrestore(&mux_lock, flags);
return 0;
}
static void tegra_drive_pinmux_config_pingroup(enum tegra_drive_pingroup pingroup,
enum tegra_hsm hsm,
enum tegra_schmitt schmitt,
enum tegra_drive drive,
enum tegra_pull_strength pull_down,
enum tegra_pull_strength pull_up,
enum tegra_slew slew_rising,
enum tegra_slew slew_falling)
{
int err;
err = tegra_drive_pinmux_set_hsm(pingroup, hsm);
if (err < 0)
pr_err("pinmux: can't set pingroup %s hsm to %s: %d\n",
drive_pinmux_name(pingroup),
enable_name(hsm), err);
err = tegra_drive_pinmux_set_schmitt(pingroup, schmitt);
if (err < 0)
pr_err("pinmux: can't set pingroup %s schmitt to %s: %d\n",
drive_pinmux_name(pingroup),
enable_name(schmitt), err);
err = tegra_drive_pinmux_set_drive(pingroup, drive);
if (err < 0)
pr_err("pinmux: can't set pingroup %s drive to %s: %d\n",
drive_pinmux_name(pingroup),
drive_name(drive), err);
err = tegra_drive_pinmux_set_pull_down(pingroup, pull_down);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pull down to %d: %d\n",
drive_pinmux_name(pingroup),
pull_down, err);
err = tegra_drive_pinmux_set_pull_up(pingroup, pull_up);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pull up to %d: %d\n",
drive_pinmux_name(pingroup),
pull_up, err);
err = tegra_drive_pinmux_set_slew_rising(pingroup, slew_rising);
if (err < 0)
pr_err("pinmux: can't set pingroup %s rising slew to %s: %d\n",
drive_pinmux_name(pingroup),
slew_name(slew_rising), err);
err = tegra_drive_pinmux_set_slew_falling(pingroup, slew_falling);
if (err < 0)
pr_err("pinmux: can't set pingroup %s falling slew to %s: %d\n",
drive_pinmux_name(pingroup),
slew_name(slew_falling), err);
}
void tegra_drive_pinmux_config_table(struct tegra_drive_pingroup_config *config,
int len)
{
int i;
for (i = 0; i < len; i++)
tegra_drive_pinmux_config_pingroup(config[i].pingroup,
config[i].hsm,
config[i].schmitt,
config[i].drive,
config[i].pull_down,
config[i].pull_up,
config[i].slew_rising,
config[i].slew_falling);
}
void tegra_pinmux_set_safe_pinmux_table(const struct tegra_pingroup_config *config,
int len)
{
int i;
struct tegra_pingroup_config c;
for (i = 0; i < len; i++) {
int err;
c = config[i];
if (c.pingroup < 0 || c.pingroup >= TEGRA_MAX_PINGROUP) {
WARN_ON(1);
continue;
}
c.func = pingroups[c.pingroup].func_safe;
err = tegra_pinmux_set_func(&c);
if (err < 0)
pr_err("%s: tegra_pinmux_set_func returned %d setting "
"%s to %s\n", __func__, err,
pingroup_name(c.pingroup), func_name(c.func));
}
}
void tegra_pinmux_config_pinmux_table(const struct tegra_pingroup_config *config,
int len)
{
int i;
for (i = 0; i < len; i++) {
int err;
if (config[i].pingroup < 0 ||
config[i].pingroup >= TEGRA_MAX_PINGROUP) {
WARN_ON(1);
continue;
}
err = tegra_pinmux_set_func(&config[i]);
if (err < 0)
pr_err("%s: tegra_pinmux_set_func returned %d setting "
"%s to %s\n", __func__, err,
pingroup_name(config[i].pingroup),
func_name(config[i].func));
}
}
void tegra_pinmux_config_tristate_table(const struct tegra_pingroup_config *config,
int len, enum tegra_tristate tristate)
{
int i;
int err;
enum tegra_pingroup pingroup;
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
if (pingroups[pingroup].tri_reg >= 0) {
err = tegra_pinmux_set_tristate(pingroup, tristate);
if (err < 0)
pr_err("pinmux: can't set pingroup %s tristate"
" to %s: %d\n", pingroup_name(pingroup),
tri_name(tristate), err);
}
}
}
void tegra_pinmux_config_pullupdown_table(const struct tegra_pingroup_config *config,
int len, enum tegra_pullupdown pupd)
{
int i;
int err;
enum tegra_pingroup pingroup;
for (i = 0; i < len; i++) {
pingroup = config[i].pingroup;
if (pingroups[pingroup].pupd_reg >= 0) {
err = tegra_pinmux_set_pullupdown(pingroup, pupd);
if (err < 0)
pr_err("pinmux: can't set pingroup %s pullupdown"
" to %s: %d\n", pingroup_name(pingroup),
pupd_name(pupd), err);
}
}
}
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static void dbg_pad_field(struct seq_file *s, int len)
{
seq_putc(s, ',');
while (len-- > -1)
seq_putc(s, ' ');
}
static int dbg_pinmux_show(struct seq_file *s, void *unused)
{
int i;
int len;
for (i = 0; i < TEGRA_MAX_PINGROUP; i++) {
unsigned long tri;
unsigned long mux;
unsigned long pupd;
seq_printf(s, "\t{TEGRA_PINGROUP_%s", pingroups[i].name);
len = strlen(pingroups[i].name);
dbg_pad_field(s, 5 - len);
if (pingroups[i].mux_reg < 0) {
seq_printf(s, "TEGRA_MUX_NONE");
len = strlen("NONE");
} else {
mux = (pg_readl(pingroups[i].mux_reg) >>
pingroups[i].mux_bit) & 0x3;
if (pingroups[i].funcs[mux] == TEGRA_MUX_RSVD) {
seq_printf(s, "TEGRA_MUX_RSVD%1lu", mux+1);
len = 5;
} else {
seq_printf(s, "TEGRA_MUX_%s",
tegra_mux_names[pingroups[i].funcs[mux]]);
len = strlen(tegra_mux_names[pingroups[i].funcs[mux]]);
}
}
dbg_pad_field(s, 13-len);
if (pingroups[i].pupd_reg < 0) {
seq_printf(s, "TEGRA_PUPD_NORMAL");
len = strlen("NORMAL");
} else {
pupd = (pg_readl(pingroups[i].pupd_reg) >>
pingroups[i].pupd_bit) & 0x3;
seq_printf(s, "TEGRA_PUPD_%s", pupd_name(pupd));
len = strlen(pupd_name(pupd));
}
dbg_pad_field(s, 9 - len);
if (pingroups[i].tri_reg < 0) {
seq_printf(s, "TEGRA_TRI_NORMAL");
} else {
tri = (pg_readl(pingroups[i].tri_reg) >>
pingroups[i].tri_bit) & 0x1;
seq_printf(s, "TEGRA_TRI_%s", tri_name(tri));
}
seq_printf(s, "},\n");
}
return 0;
}
static int dbg_pinmux_open(struct inode *inode, struct file *file)
{
return single_open(file, dbg_pinmux_show, &inode->i_private);
}
static const struct file_operations debug_fops = {
.open = dbg_pinmux_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dbg_drive_pinmux_show(struct seq_file *s, void *unused)
{
int i;
int len;
for (i = 0; i < TEGRA_MAX_DRIVE_PINGROUP; i++) {
u32 reg;
seq_printf(s, "\t{TEGRA_DRIVE_PINGROUP_%s",
drive_pingroups[i].name);
len = strlen(drive_pingroups[i].name);
dbg_pad_field(s, 7 - len);
reg = pg_readl(drive_pingroups[i].reg);
if (HSM_EN(reg)) {
seq_printf(s, "TEGRA_HSM_ENABLE");
len = 16;
} else {
seq_printf(s, "TEGRA_HSM_DISABLE");
len = 17;
}
dbg_pad_field(s, 17 - len);
if (SCHMT_EN(reg)) {
seq_printf(s, "TEGRA_SCHMITT_ENABLE");
len = 21;
} else {
seq_printf(s, "TEGRA_SCHMITT_DISABLE");
len = 22;
}
dbg_pad_field(s, 22 - len);
seq_printf(s, "TEGRA_DRIVE_%s", drive_name(LPMD(reg)));
len = strlen(drive_name(LPMD(reg)));
dbg_pad_field(s, 5 - len);
seq_printf(s, "TEGRA_PULL_%d", DRVDN(reg));
len = DRVDN(reg) < 10 ? 1 : 2;
dbg_pad_field(s, 2 - len);
seq_printf(s, "TEGRA_PULL_%d", DRVUP(reg));
len = DRVUP(reg) < 10 ? 1 : 2;
dbg_pad_field(s, 2 - len);
seq_printf(s, "TEGRA_SLEW_%s", slew_name(SLWR(reg)));
len = strlen(slew_name(SLWR(reg)));
dbg_pad_field(s, 7 - len);
seq_printf(s, "TEGRA_SLEW_%s", slew_name(SLWF(reg)));
seq_printf(s, "},\n");
}
return 0;
}
static int dbg_drive_pinmux_open(struct inode *inode, struct file *file)
{
return single_open(file, dbg_drive_pinmux_show, &inode->i_private);
}
static const struct file_operations debug_drive_fops = {
.open = dbg_drive_pinmux_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init tegra_pinmux_debuginit(void)
{
(void) debugfs_create_file("tegra_pinmux", S_IRUGO,
NULL, NULL, &debug_fops);
(void) debugfs_create_file("tegra_pinmux_drive", S_IRUGO,
NULL, NULL, &debug_drive_fops);
return 0;
}
late_initcall(tegra_pinmux_debuginit);
#endif
| gpl-2.0 |
maxwen/primou-kernel-HTC | net/dns_resolver/dns_key.c | 3039 | 8111 | /* Key type used to cache DNS lookups made by the kernel
*
* See Documentation/networking/dns_resolver.txt
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
* Steve French (sfrench@us.ibm.com)
* Wang Lei (wang840925@gmail.com)
* David Howells (dhowells@redhat.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
MODULE_DESCRIPTION("DNS Resolver");
MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
unsigned dns_resolver_debug;
module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
const struct cred *dns_resolver_cache;
#define DNS_ERRORNO_OPTION "dnserror"
/*
* Instantiate a user defined key for dns_resolver.
*
* The data must be a NUL-terminated string, with the NUL char accounted in
* datalen.
*
* If the data contains a '#' characters, then we take the clause after each
* one to be an option of the form 'key=value'. The actual data of interest is
* the string leading up to the first '#'. For instance:
*
* "ip1,ip2,...#foo=bar"
*/
static int
dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
{
struct user_key_payload *upayload;
unsigned long derrno;
int ret;
size_t result_len = 0;
const char *data = _data, *end, *opt;
kenter("%%%d,%s,'%*.*s',%zu",
key->serial, key->description,
(int)datalen, (int)datalen, data, datalen);
if (datalen <= 1 || !data || data[datalen - 1] != '\0')
return -EINVAL;
datalen--;
/* deal with any options embedded in the data */
end = data + datalen;
opt = memchr(data, '#', datalen);
if (!opt) {
/* no options: the entire data is the result */
kdebug("no options");
result_len = datalen;
} else {
const char *next_opt;
result_len = opt - data;
opt++;
kdebug("options: '%s'", opt);
do {
const char *eq;
int opt_len, opt_nlen, opt_vlen, tmp;
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (!opt_len) {
printk(KERN_WARNING
"Empty option to dns_resolver key %d\n",
key->serial);
return -EINVAL;
}
eq = memchr(opt, '=', opt_len) ?: end;
opt_nlen = eq - opt;
eq++;
opt_vlen = next_opt - eq; /* will be -1 if no value */
tmp = opt_vlen >= 0 ? opt_vlen : 0;
kdebug("option '%*.*s' val '%*.*s'",
opt_nlen, opt_nlen, opt, tmp, tmp, eq);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
if (opt_vlen <= 0)
goto bad_option_value;
ret = strict_strtoul(eq, 10, &derrno);
if (ret < 0)
goto bad_option_value;
if (derrno < 1 || derrno > 511)
goto bad_option_value;
kdebug("dns error no. = %lu", derrno);
key->type_data.x[0] = -derrno;
continue;
}
bad_option_value:
printk(KERN_WARNING
"Option '%*.*s' to dns_resolver key %d:"
" bad/missing value\n",
opt_nlen, opt_nlen, opt, key->serial);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
/* don't cache the result if we're caching an error saying there's no
* result */
if (key->type_data.x[0]) {
kleave(" = 0 [h_error %ld]", key->type_data.x[0]);
return 0;
}
kdebug("store result");
ret = key_payload_reserve(key, result_len);
if (ret < 0)
return -EINVAL;
upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
if (!upayload) {
kleave(" = -ENOMEM");
return -ENOMEM;
}
upayload->datalen = result_len;
memcpy(upayload->data, data, result_len);
upayload->data[result_len] = '\0';
rcu_assign_pointer(key->payload.data, upayload);
kleave(" = 0");
return 0;
}
/*
* The description is of the form "[<type>:]<domain_name>"
*
* The domain name may be a simple name or an absolute domain name (which
* should end with a period). The domain name is case-independent.
*/
static int
dns_resolver_match(const struct key *key, const void *description)
{
int slen, dlen, ret = 0;
const char *src = key->description, *dsp = description;
kenter("%s,%s", src, dsp);
if (!src || !dsp)
goto no_match;
if (strcasecmp(src, dsp) == 0)
goto matched;
slen = strlen(src);
dlen = strlen(dsp);
if (slen <= 0 || dlen <= 0)
goto no_match;
if (src[slen - 1] == '.')
slen--;
if (dsp[dlen - 1] == '.')
dlen--;
if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
goto no_match;
matched:
ret = 1;
no_match:
kleave(" = %d", ret);
return ret;
}
/*
* Describe a DNS key
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
int err = key->type_data.x[0];
seq_puts(m, key->description);
if (key_is_instantiated(key)) {
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
/*
* read the DNS data
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
char __user *buffer, size_t buflen)
{
if (key->type_data.x[0])
return key->type_data.x[0];
return user_read(key, buffer, buflen);
}
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
.instantiate = dns_resolver_instantiate,
.match = dns_resolver_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = dns_resolver_describe,
.read = dns_resolver_read,
};
static int __init init_dns_resolver(void)
{
struct cred *cred;
struct key *keyring;
int ret;
printk(KERN_NOTICE "Registering the %s key type\n",
key_type_dns_resolver.name);
/* create an override credential set with a special thread keyring in
* which DNS requests are cached
*
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
cred = prepare_kernel_cred(NULL);
if (!cred)
return -ENOMEM;
keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
if (ret < 0)
goto failed_put_key;
ret = register_key_type(&key_type_dns_resolver);
if (ret < 0)
goto failed_put_key;
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
dns_resolver_cache = cred;
kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
return 0;
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
static void __exit exit_dns_resolver(void)
{
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
put_cred(dns_resolver_cache);
printk(KERN_NOTICE "Unregistered %s key type\n",
key_type_dns_resolver.name);
}
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
| gpl-2.0 |
AresHou/android_kernel_lge_geehrc | drivers/staging/vme/boards/vme_vmivme7805.c | 3551 | 2955 | /*
* Support for the VMIVME-7805 board access to the Universe II bridge.
*
* Author: Arthur Benilov <arthur.benilov@iba-group.com>
* Copyright 2010 Ion Beam Application, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/io.h>
#include "vme_vmivme7805.h"
static int __init vmic_init(void);
static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
static char driver_name[] = "vmivme_7805";
static struct pci_device_id vmic_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
static struct pci_driver vmic_driver = {
.name = driver_name,
.id_table = vmic_ids,
.probe = vmic_probe,
.remove = vmic_remove,
};
static int __init vmic_init(void)
{
return pci_register_driver(&vmic_driver);
}
static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval;
u32 data;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* Map registers in BAR 0 */
vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Clear the FPGA VME IF contents */
iowrite32(0, vmic_base + VME_CONTROL);
/* Clear any initial BERR */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data |= BM_VME_CONTROL_BERRST;
iowrite32(data, vmic_base + VME_CONTROL);
/* Enable the vme interface and byte swapping */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data = data | BM_VME_CONTROL_MASTER_ENDIAN |
BM_VME_CONTROL_SLAVE_ENDIAN |
BM_VME_CONTROL_ABLE |
BM_VME_CONTROL_BERRI |
BM_VME_CONTROL_BPENA |
BM_VME_CONTROL_VBENA;
iowrite32(data, vmic_base + VME_CONTROL);
return 0;
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err:
return retval;
}
static void vmic_remove(struct pci_dev *pdev)
{
iounmap(vmic_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void __exit vmic_exit(void)
{
pci_unregister_driver(&vmic_driver);
}
MODULE_DESCRIPTION("VMIVME-7805 board support driver");
MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
MODULE_LICENSE("GPL");
module_init(vmic_init);
module_exit(vmic_exit);
| gpl-2.0 |
shao2610/3.4-kernel | drivers/staging/octeon/ethernet-sgmii.c | 7903 | 3719 | /**********************************************************************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2007 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/ratelimit.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-util.h"
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
int cvm_oct_sgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
cvmx_helper_link_info_t link_info;
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
if (!octeon_is_simulation()) {
link_info = cvmx_helper_link_get(priv->port);
if (!link_info.s.link_up)
netif_carrier_off(dev);
}
return 0;
}
int cvm_oct_sgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
static void cvm_oct_sgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvmx_helper_link_info_t link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
return;
link_info = cvmx_helper_link_autoconf(priv->port);
priv->link_info = link_info.u64;
/* Tell Linux */
if (link_info.s.link_up) {
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
if (priv->queue != -1)
printk_ratelimited
("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
dev->name, link_info.s.speed,
(link_info.s.full_duplex) ? "Full" : "Half",
priv->port, priv->queue);
else
printk_ratelimited
("%s: %u Mbps %s duplex, port %2d, POW\n",
dev->name, link_info.s.speed,
(link_info.s.full_duplex) ? "Full" : "Half",
priv->port);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
printk_ratelimited("%s: Link down\n", dev->name);
}
}
int cvm_oct_sgmii_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation() && priv->phydev == NULL)
priv->poll = cvm_oct_sgmii_poll;
/* FIXME: Need autoneg logic */
return 0;
}
void cvm_oct_sgmii_uninit(struct net_device *dev)
{
cvm_oct_common_uninit(dev);
}
| gpl-2.0 |
batlin1977/LG_L90_kernel | drivers/infiniband/hw/ehca/hcp_if.c | 8415 | 28897 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Firmware Infiniband Interface code for POWER
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/hvcall.h>
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hcp_phyp.h"
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
static DEFINE_SPINLOCK(hcall_lock);
static u32 get_longbusy_msecs(int longbusy_rc)
{
switch (longbusy_rc) {
case H_LONG_BUSY_ORDER_1_MSEC:
return 1;
case H_LONG_BUSY_ORDER_10_MSEC:
return 10;
case H_LONG_BUSY_ORDER_100_MSEC:
return 100;
case H_LONG_BUSY_ORDER_1_SEC:
return 1000;
case H_LONG_BUSY_ORDER_10_SEC:
return 10000;
case H_LONG_BUSY_ORDER_100_SEC:
return 100000;
default:
return 1;
}
}
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
arg5, arg6, arg7);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS)
ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
else
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret;
}
return H_BUSY;
}
static long ehca_plpar_hcall9(unsigned long opcode,
unsigned long *outs, /* array of 9 outputs */
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7,
unsigned long arg8,
unsigned long arg9)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS) {
ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
} else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
return ret;
}
return H_BUSY;
}
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
const u32 number_of_entries,
struct ipz_eq_handle *eq_handle,
u32 *act_nr_of_entries,
u32 *act_pages,
u32 *eq_ist)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls;
/* resource type */
allocate_controls = 3ULL;
/* ISN is associated */
if (neq_control != 1)
allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
else /* notification event queue */
allocate_controls = (1ULL << 63) | allocate_controls;
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
number_of_entries, /* r6 */
0, 0, 0, 0, 0, 0);
eq_handle->handle = outs[0];
*act_nr_of_entries = (u32)outs[3];
*act_pages = (u32)outs[4];
*eq_ist = (u32)outs[5];
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resource - ret=%lli ", ret);
return ret;
}
u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
struct ipz_eq_handle eq_handle,
const u64 event_mask)
{
return ehca_plpar_hcall_norets(H_RESET_EVENTS,
adapter_handle.handle, /* r4 */
eq_handle.handle, /* r5 */
event_mask, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
2, /* r5 */
param->eq_handle.handle, /* r6 */
cq->token, /* r7 */
param->nr_cqe, /* r8 */
0, 0, 0, 0);
cq->ipz_cq_handle.handle = outs[0];
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[5]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
allocate_controls =
EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
| EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
parms->squeue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
parms->rqueue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_RECV_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_SEND_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
parms->ud_av_l_key_ctl)
| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
max_r10_reg =
EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
parms->squeue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
parms->rqueue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
parms->squeue.max_sge)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
parms->rqueue.max_sge);
r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
if (parms->ext_type == EQPT_SRQ)
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
else
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
parms->send_cq_handle.handle,
parms->recv_cq_handle.handle,
parms->eq_handle.handle,
((u64)parms->token << 32) | parms->pd.value,
max_r10_reg, r11, r12);
parms->qp_handle.handle = outs[0];
parms->real_qp_num = (u32)outs[1];
parms->squeue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
parms->rqueue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
parms->squeue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
parms->rqueue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
parms->squeue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[6]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
parms->qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
struct hipz_query_port *query_port_response_block)
{
u64 ret;
u64 r_cb = virt_to_abs(query_port_response_block);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response block not page aligned");
return H_PARAMETER;
}
ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
r_cb, /* r6 */
0, 0, 0, 0);
if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
}
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask)
{
u64 port_attributes = port_cap;
if (modify_mask & IB_PORT_SHUTDOWN)
port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
if (modify_mask & IB_PORT_INIT_TYPE)
port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
return ehca_plpar_hcall_norets(H_MODIFY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
port_attributes, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock)
{
u64 r_cb = virt_to_abs(query_hca_rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response_block=%p not page aligned",
query_hca_rblock);
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_QUERY_HCA,
adapter_handle.handle, /* r4 */
r_cb, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
const u8 pagesize,
const u8 queue_type,
const u64 resource_handle,
const u64 logical_address_of_page,
u64 count)
{
return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
adapter_handle.handle, /* r4 */
(u64)queue_type | ((u64)pagesize) << 8,
/* r5 */
resource_handle, /* r6 */
logical_address_of_page, /* r7 */
count, /* r8 */
0, 0);
}
u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_eq_handle eq_handle,
struct ehca_pfeq *pfeq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
if (count != 1) {
ehca_gen_err("Ppage counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle,
pagesize,
queue_type,
eq_handle.handle,
logical_address_of_page, count);
}
u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
u32 ist)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
adapter_handle.handle, /* r4 */
ist, /* r5 */
0, 0, 0, 0, 0);
if (ret != H_SUCCESS && ret != H_BUSY)
ehca_gen_err("Could not query interrupt state.");
return ret;
}
u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_cq_handle cq_handle,
struct ehca_pfcq *pfcq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa gal)
{
if (count != 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
cq_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa galpa)
{
if (count > 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
qp_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
void **log_addr_next_sq_wqe2processed,
void **log_addr_next_rq_wqe2processed,
int dis_and_get_function_code)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
dis_and_get_function_code, /* r5 */
qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (log_addr_next_sq_wqe2processed)
*log_addr_next_sq_wqe2processed = (void *)outs[0];
if (log_addr_next_rq_wqe2processed)
*log_addr_next_rq_wqe2processed = (void *)outs[1];
return ret;
}
u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u64 update_mask,
struct hcp_modify_qp_control_block *mqpcb,
struct h_galpa gal)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
update_mask, /* r6 */
virt_to_abs(mqpcb), /* r7 */
0, 0, 0, 0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Insufficient resources ret=%lli", ret);
return ret;
}
u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
struct hcp_modify_qp_control_block *qqpcb,
struct h_galpa gal)
{
return ehca_plpar_hcall_norets(H_QUERY_QP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
virt_to_abs(qqpcb), /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = hcp_galpas_dtor(&qp->galpas);
if (ret) {
ehca_gen_err("Could not destruct qp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
/* function code */
1, /* r5 */
qp->ipz_qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (ret == H_HARDWARE)
ehca_gen_err("HCA not operational. ret=%lli", ret);
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
qp->ipz_qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource still in use. ret=%lli", ret);
return ret;
}
u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port)
{
return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port, u32 * pma_qp_nr,
u32 * bma_qp_nr)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0, 0, 0);
*pma_qp_nr = (u32)outs[0];
*bma_qp_nr = (u32)outs[1];
if (ret == H_ALIAS_EXIST)
ehca_gen_err("AQP1 already exists. ret=%lli", ret);
return ret;
}
u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
return ehca_plpar_hcall_norets(H_DETACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
}
u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
u8 force_flag)
{
u64 ret;
ret = hcp_galpas_dtor(&cq->galpas);
if (ret) {
ehca_gen_err("Could not destruct cp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
force_flag != 0 ? 1L : 0L, /* r6 */
0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
return ret;
}
u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_eq *eq)
{
u64 ret;
ret = hcp_galpas_dtor(&eq->galpas);
if (ret) {
ehca_gen_err("Could not destruct eq->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
eq->ipz_eq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource in use. ret=%lli ", ret);
return ret;
}
u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
vaddr, /* r6 */
length, /* r7 */
(((u64)access_ctrl) << 32ULL), /* r8 */
pd.value, /* r9 */
0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
u64 ret;
if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) {
u64 *kpage;
int i;
kpage = (u64 *)abs_to_virt(logical_address_of_page);
for (i = 0; i < count; i++)
ehca_gen_dbg("kpage[%d]=%p",
i, (void *)kpage[i]);
} else
ehca_gen_dbg("kpage=%p",
(void *)logical_address_of_page);
}
if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
ehca_gen_err("logical_address_of_page not on a 4k boundary "
"adapter_handle=%llx mr=%p mr_handle=%llx "
"pagesize=%x queue_type=%x "
"logical_address_of_page=%llx count=%llx",
adapter_handle.handle, mr,
mr->ipz_mr_handle.handle, pagesize, queue_type,
logical_address_of_page, count);
ret = H_PARAMETER;
} else
ret = hipz_h_register_rpage(adapter_handle, pagesize,
queue_type,
mr->ipz_mr_handle.handle,
logical_address_of_page, count);
return ret;
}
u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->len = outs[0];
outparms->vaddr = outs[1];
outparms->acl = outs[4] >> 32;
outparms->lkey = (u32)(outs[5] >> 32);
outparms->rkey = (u32)(outs[5] & (0xffffffff));
return ret;
}
u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr_in,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
const u64 mr_addr_cb,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
length, /* r7 */
/* r8 */
((((u64)access_ctrl) << 32ULL) | pd.value),
mr_addr_cb, /* r9 */
0, 0, 0);
outparms->vaddr = outs[1];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const struct ehca_mr *orig_mr,
const u64 vaddr_in,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
adapter_handle.handle, /* r4 */
orig_mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
(((u64)access_ctrl) << 32ULL), /* r7 */
pd.value, /* r8 */
0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
const struct ipz_pd pd,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
6, /* r5 */
pd.value, /* r6 */
0, 0, 0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
const u64 ressource_handle,
void *rblock,
unsigned long *byte_count)
{
u64 r_cb = virt_to_abs(rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("rblock not page aligned.");
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_ERROR_DATA,
adapter_handle.handle,
ressource_handle,
r_cb,
0, 0, 0, 0);
}
u64 hipz_h_eoi(int irq)
{
unsigned long xirr;
iosync();
xirr = (0xffULL << 24) | irq;
return plpar_hcall_norets(H_EOI, xirr);
}
| gpl-2.0 |
mattyen/ntb | drivers/uwb/i1480/dfu/usb.c | 9439 | 13404 | /*
* Intel Wireless UWB Link 1480
* USB SKU firmware upload implementation
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* This driver will prepare the i1480 device to behave as a real
* Wireless USB HWA adaptor by uploading the firmware.
*
* When the device is connected or driver is loaded, i1480_usb_probe()
* is called--this will allocate and initialize the device structure,
* fill in the pointers to the common functions (read, write,
* wait_init_done and cmd for HWA command execution) and once that is
* done, call the common firmware uploading routine. Then clean up and
* return -ENODEV, as we don't attach to the device.
*
* The rest are the basic ops we implement that the fw upload code
* uses to do its job. All the ops in the common code are i1480->NAME,
* the functions are i1480_usb_NAME().
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/uwb.h>
#include <linux/usb/wusb.h>
#include <linux/usb/wusb-wa.h>
#include "i1480-dfu.h"
struct i1480_usb {
struct i1480 i1480;
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
struct urb *neep_urb; /* URB for reading from EP1 */
};
static
void i1480_usb_init(struct i1480_usb *i1480_usb)
{
i1480_init(&i1480_usb->i1480);
}
static
int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
{
struct usb_device *usb_dev = interface_to_usbdev(iface);
int result = -ENOMEM;
i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
i1480_usb->usb_iface = usb_get_intf(iface);
usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
if (i1480_usb->neep_urb == NULL)
goto error;
return 0;
error:
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
}
static
void i1480_usb_destroy(struct i1480_usb *i1480_usb)
{
usb_kill_urb(i1480_usb->neep_urb);
usb_free_urb(i1480_usb->neep_urb);
usb_set_intfdata(i1480_usb->usb_iface, NULL);
usb_put_intf(i1480_usb->usb_iface);
usb_put_dev(i1480_usb->usb_dev);
}
/**
* Write a buffer to a memory address in the i1480 device
*
* @i1480: i1480 instance
* @memory_address:
* Address where to write the data buffer to.
* @buffer: Buffer to the data
* @size: Size of the buffer [has to be < 512].
* @returns: 0 if ok, < 0 errno code on error.
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
* case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
const void *buffer, size_t size)
{
int result = 0;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
size_t buffer_size, itr = 0;
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
while (size > 0) {
buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
memory_address, (memory_address >> 16),
i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
if (result < 0)
break;
itr += result;
memory_address += result;
size -= result;
}
return result;
}
/**
* Read a block [max size 512] of the device's memory to @i1480's buffer.
*
* @i1480: i1480 instance
* @memory_address:
* Address where to read from.
* @size: Size to read. Smaller than or equal to 512.
* @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
*
* NOTE: if the memory address or block is incorrect, you might get a
* stall or a different memory read. Caller has to verify the
* memory address and size passed back in the @neh structure.
*/
static
int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
{
ssize_t result = 0, bytes = 0;
size_t itr, read_size = i1480->buf_size;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
BUG_ON(size > i1480->buf_size);
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
BUG_ON(read_size > 512);
if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
read_size = 4;
for (itr = 0; itr < size; itr += read_size) {
size_t itr_addr = addr + itr;
size_t itr_size = min(read_size, size - itr);
result = usb_control_msg(
i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
itr_addr, (itr_addr >> 16),
i1480->cmd_buf + itr, itr_size,
100 /* FIXME: arbitrary */);
if (result < 0) {
dev_err(i1480->dev, "%s: USB read error: %zd\n",
__func__, result);
goto out;
}
if (result != itr_size) {
result = -EIO;
dev_err(i1480->dev,
"%s: partial read got only %zu bytes vs %zu expected\n",
__func__, result, itr_size);
goto out;
}
bytes += result;
}
result = bytes;
out:
return result;
}
/**
* Callback for reads on the notification/event endpoint
*
* Just enables the completion read handler.
*/
static
void i1480_usb_neep_cb(struct urb *urb)
{
struct i1480 *i1480 = urb->context;
struct device *dev = i1480->dev;
switch (urb->status) {
case 0:
break;
case -ECONNRESET: /* Not an error, but a controlled situation; */
case -ENOENT: /* (we killed the URB)...so, no broadcast */
dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
break;
case -ESHUTDOWN: /* going away! */
dev_dbg(dev, "NEEP: down %d\n", urb->status);
break;
default:
dev_err(dev, "NEEP: unknown status %d\n", urb->status);
break;
}
i1480->evt_result = urb->actual_length;
complete(&i1480->evt_complete);
return;
}
/**
* Wait for the MAC FW to initialize
*
* MAC FW sends a 0xfd/0101/00 notification to EP1 when done
* initializing. Get that notification into i1480->evt_buf; upper layer
* will verify it.
*
* Set i1480->evt_result with the result of getting the event or its
* size (if successful).
*
* Delivers the data directly to i1480->evt_buf
*/
static
int i1480_usb_wait_init_done(struct i1480 *i1480)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
init_completion(&i1480->evt_complete);
i1480->evt_result = -EINPROGRESS;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "init done: cannot submit NEEP read: %d\n",
result);
goto error_submit;
}
/* Wait for the USB callback to get the data */
result = wait_for_completion_interruptible_timeout(
&i1480->evt_complete, HZ);
if (result <= 0) {
result = result == 0 ? -ETIMEDOUT : result;
goto error_wait;
}
usb_kill_urb(i1480_usb->neep_urb);
return 0;
error_wait:
usb_kill_urb(i1480_usb->neep_urb);
error_submit:
i1480->evt_result = result;
return result;
}
/**
* Generic function for issuing commands to the i1480
*
* @i1480: i1480 instance
* @cmd_name: Name of the command (for error messages)
* @cmd: Pointer to command buffer
* @cmd_size: Size of the command buffer
* @reply: Buffer for the reply event
* @reply_size: Expected size back (including RCEB); the reply buffer
* is assumed to be as big as this.
* @returns: >= 0 size of the returned event data if ok,
* < 0 errno code on error.
*
* Arms the NE handle, issues the command to the device and checks the
* basics of the reply event.
*/
static
int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
struct uwb_rccb *cmd = i1480->cmd_buf;
u8 iface_no;
/* Post a read on the notification & event endpoint */
iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(
i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "%s: cannot submit NEEP read: %d\n",
cmd_name, result);
goto error_submit_ep1;
}
/* Now post the command on EP0 */
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
WA_EXEC_RC_CMD,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0, iface_no,
cmd, cmd_size,
100 /* FIXME: this is totally arbitrary */);
if (result < 0) {
dev_err(dev, "%s: control request failed: %d\n",
cmd_name, result);
goto error_submit_ep0;
}
return result;
error_submit_ep0:
usb_kill_urb(i1480_usb->neep_urb);
error_submit_ep1:
return result;
}
/*
* Probe a i1480 device for uploading firmware.
*
* We attach only to interface #0, which is the radio control interface.
*/
static
int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
{
struct i1480_usb *i1480_usb;
struct i1480 *i1480;
struct device *dev = &iface->dev;
int result;
result = -ENODEV;
if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
dev_dbg(dev, "not attaching to iface %d\n",
iface->cur_altsetting->desc.bInterfaceNumber);
goto error;
}
if (iface->num_altsetting > 1
&& interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
if (result < 0)
dev_warn(dev,
"can't set altsetting 1 on iface 0: %d\n",
result);
}
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
dev_err(dev, "Unable to allocate instance\n");
goto error;
}
i1480_usb_init(i1480_usb);
i1480 = &i1480_usb->i1480;
i1480->buf_size = 512;
i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
if (i1480->cmd_buf == NULL) {
dev_err(dev, "Cannot allocate transfer buffers\n");
result = -ENOMEM;
goto error_buf_alloc;
}
i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
result = i1480_usb_create(i1480_usb, iface);
if (result < 0) {
dev_err(dev, "Cannot create instance: %d\n", result);
goto error_create;
}
/* setup the fops and upload the firmware */
i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
i1480->mac_fw_name = "i1480-usb-0.0.bin";
i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
i1480->phy_fw_name = "i1480-phy-0.0.bin";
i1480->dev = &iface->dev;
i1480->write = i1480_usb_write;
i1480->read = i1480_usb_read;
i1480->rc_setup = NULL;
i1480->wait_init_done = i1480_usb_wait_init_done;
i1480->cmd = i1480_usb_cmd;
result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
if (result >= 0) {
usb_reset_device(i1480_usb->usb_dev);
result = -ENODEV; /* we don't want to bind to the iface */
}
i1480_usb_destroy(i1480_usb);
error_create:
kfree(i1480->cmd_buf);
error_buf_alloc:
kfree(i1480_usb);
error:
return result;
}
MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
MODULE_FIRMWARE("i1480-usb-0.0.bin");
MODULE_FIRMWARE("i1480-phy-0.0.bin");
#define i1480_USB_DEV(v, p) \
{ \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE \
| USB_DEVICE_ID_MATCH_DEV_INFO \
| USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (v), \
.idProduct = (p), \
.bDeviceClass = 0xff, \
.bDeviceSubClass = 0xff, \
.bDeviceProtocol = 0xff, \
.bInterfaceClass = 0xff, \
.bInterfaceSubClass = 0xff, \
.bInterfaceProtocol = 0xff, \
}
/** USB device ID's that we handle */
static const struct usb_device_id i1480_usb_id_table[] = {
i1480_USB_DEV(0x8086, 0xdf3b),
i1480_USB_DEV(0x15a9, 0x0005),
i1480_USB_DEV(0x07d1, 0x3802),
i1480_USB_DEV(0x050d, 0x305a),
i1480_USB_DEV(0x3495, 0x3007),
{},
};
MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
static struct usb_driver i1480_dfu_driver = {
.name = "i1480-dfu-usb",
.id_table = i1480_usb_id_table,
.probe = i1480_usb_probe,
.disconnect = NULL,
};
module_usb_driver(i1480_dfu_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bestgames1/android_kernel_samsung_kylepro | drivers/uwb/i1480/dfu/usb.c | 9439 | 13404 | /*
* Intel Wireless UWB Link 1480
* USB SKU firmware upload implementation
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* This driver will prepare the i1480 device to behave as a real
* Wireless USB HWA adaptor by uploading the firmware.
*
* When the device is connected or driver is loaded, i1480_usb_probe()
* is called--this will allocate and initialize the device structure,
* fill in the pointers to the common functions (read, write,
* wait_init_done and cmd for HWA command execution) and once that is
* done, call the common firmware uploading routine. Then clean up and
* return -ENODEV, as we don't attach to the device.
*
* The rest are the basic ops we implement that the fw upload code
* uses to do its job. All the ops in the common code are i1480->NAME,
* the functions are i1480_usb_NAME().
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/uwb.h>
#include <linux/usb/wusb.h>
#include <linux/usb/wusb-wa.h>
#include "i1480-dfu.h"
struct i1480_usb {
struct i1480 i1480;
struct usb_device *usb_dev;
struct usb_interface *usb_iface;
struct urb *neep_urb; /* URB for reading from EP1 */
};
static
void i1480_usb_init(struct i1480_usb *i1480_usb)
{
i1480_init(&i1480_usb->i1480);
}
static
int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
{
struct usb_device *usb_dev = interface_to_usbdev(iface);
int result = -ENOMEM;
i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
i1480_usb->usb_iface = usb_get_intf(iface);
usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
if (i1480_usb->neep_urb == NULL)
goto error;
return 0;
error:
usb_set_intfdata(iface, NULL);
usb_put_intf(iface);
usb_put_dev(usb_dev);
return result;
}
static
void i1480_usb_destroy(struct i1480_usb *i1480_usb)
{
usb_kill_urb(i1480_usb->neep_urb);
usb_free_urb(i1480_usb->neep_urb);
usb_set_intfdata(i1480_usb->usb_iface, NULL);
usb_put_intf(i1480_usb->usb_iface);
usb_put_dev(i1480_usb->usb_dev);
}
/**
* Write a buffer to a memory address in the i1480 device
*
* @i1480: i1480 instance
* @memory_address:
* Address where to write the data buffer to.
* @buffer: Buffer to the data
* @size: Size of the buffer [has to be < 512].
* @returns: 0 if ok, < 0 errno code on error.
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
* case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
const void *buffer, size_t size)
{
int result = 0;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
size_t buffer_size, itr = 0;
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
while (size > 0) {
buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
memory_address, (memory_address >> 16),
i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
if (result < 0)
break;
itr += result;
memory_address += result;
size -= result;
}
return result;
}
/**
* Read a block [max size 512] of the device's memory to @i1480's buffer.
*
* @i1480: i1480 instance
* @memory_address:
* Address where to read from.
* @size: Size to read. Smaller than or equal to 512.
* @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
*
* NOTE: if the memory address or block is incorrect, you might get a
* stall or a different memory read. Caller has to verify the
* memory address and size passed back in the @neh structure.
*/
static
int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
{
ssize_t result = 0, bytes = 0;
size_t itr, read_size = i1480->buf_size;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
BUG_ON(size > i1480->buf_size);
BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
BUG_ON(read_size > 512);
if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
read_size = 4;
for (itr = 0; itr < size; itr += read_size) {
size_t itr_addr = addr + itr;
size_t itr_size = min(read_size, size - itr);
result = usb_control_msg(
i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
itr_addr, (itr_addr >> 16),
i1480->cmd_buf + itr, itr_size,
100 /* FIXME: arbitrary */);
if (result < 0) {
dev_err(i1480->dev, "%s: USB read error: %zd\n",
__func__, result);
goto out;
}
if (result != itr_size) {
result = -EIO;
dev_err(i1480->dev,
"%s: partial read got only %zu bytes vs %zu expected\n",
__func__, result, itr_size);
goto out;
}
bytes += result;
}
result = bytes;
out:
return result;
}
/**
* Callback for reads on the notification/event endpoint
*
* Just enables the completion read handler.
*/
static
void i1480_usb_neep_cb(struct urb *urb)
{
struct i1480 *i1480 = urb->context;
struct device *dev = i1480->dev;
switch (urb->status) {
case 0:
break;
case -ECONNRESET: /* Not an error, but a controlled situation; */
case -ENOENT: /* (we killed the URB)...so, no broadcast */
dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
break;
case -ESHUTDOWN: /* going away! */
dev_dbg(dev, "NEEP: down %d\n", urb->status);
break;
default:
dev_err(dev, "NEEP: unknown status %d\n", urb->status);
break;
}
i1480->evt_result = urb->actual_length;
complete(&i1480->evt_complete);
return;
}
/**
* Wait for the MAC FW to initialize
*
* MAC FW sends a 0xfd/0101/00 notification to EP1 when done
* initializing. Get that notification into i1480->evt_buf; upper layer
* will verify it.
*
* Set i1480->evt_result with the result of getting the event or its
* size (if successful).
*
* Delivers the data directly to i1480->evt_buf
*/
static
int i1480_usb_wait_init_done(struct i1480 *i1480)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
init_completion(&i1480->evt_complete);
i1480->evt_result = -EINPROGRESS;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "init done: cannot submit NEEP read: %d\n",
result);
goto error_submit;
}
/* Wait for the USB callback to get the data */
result = wait_for_completion_interruptible_timeout(
&i1480->evt_complete, HZ);
if (result <= 0) {
result = result == 0 ? -ETIMEDOUT : result;
goto error_wait;
}
usb_kill_urb(i1480_usb->neep_urb);
return 0;
error_wait:
usb_kill_urb(i1480_usb->neep_urb);
error_submit:
i1480->evt_result = result;
return result;
}
/**
* Generic function for issuing commands to the i1480
*
* @i1480: i1480 instance
* @cmd_name: Name of the command (for error messages)
* @cmd: Pointer to command buffer
* @cmd_size: Size of the command buffer
* @reply: Buffer for the reply event
* @reply_size: Expected size back (including RCEB); the reply buffer
* is assumed to be as big as this.
* @returns: >= 0 size of the returned event data if ok,
* < 0 errno code on error.
*
* Arms the NE handle, issues the command to the device and checks the
* basics of the reply event.
*/
static
int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
{
int result;
struct device *dev = i1480->dev;
struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
struct usb_endpoint_descriptor *epd;
struct uwb_rccb *cmd = i1480->cmd_buf;
u8 iface_no;
/* Post a read on the notification & event endpoint */
iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(
i1480_usb->neep_urb, i1480_usb->usb_dev,
usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
i1480->evt_buf, i1480->buf_size,
i1480_usb_neep_cb, i1480, epd->bInterval);
result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
if (result < 0) {
dev_err(dev, "%s: cannot submit NEEP read: %d\n",
cmd_name, result);
goto error_submit_ep1;
}
/* Now post the command on EP0 */
result = usb_control_msg(
i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
WA_EXEC_RC_CMD,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0, iface_no,
cmd, cmd_size,
100 /* FIXME: this is totally arbitrary */);
if (result < 0) {
dev_err(dev, "%s: control request failed: %d\n",
cmd_name, result);
goto error_submit_ep0;
}
return result;
error_submit_ep0:
usb_kill_urb(i1480_usb->neep_urb);
error_submit_ep1:
return result;
}
/*
* Probe a i1480 device for uploading firmware.
*
* We attach only to interface #0, which is the radio control interface.
*/
static
int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
{
struct i1480_usb *i1480_usb;
struct i1480 *i1480;
struct device *dev = &iface->dev;
int result;
result = -ENODEV;
if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
dev_dbg(dev, "not attaching to iface %d\n",
iface->cur_altsetting->desc.bInterfaceNumber);
goto error;
}
if (iface->num_altsetting > 1
&& interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
if (result < 0)
dev_warn(dev,
"can't set altsetting 1 on iface 0: %d\n",
result);
}
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
dev_err(dev, "Unable to allocate instance\n");
goto error;
}
i1480_usb_init(i1480_usb);
i1480 = &i1480_usb->i1480;
i1480->buf_size = 512;
i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
if (i1480->cmd_buf == NULL) {
dev_err(dev, "Cannot allocate transfer buffers\n");
result = -ENOMEM;
goto error_buf_alloc;
}
i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
result = i1480_usb_create(i1480_usb, iface);
if (result < 0) {
dev_err(dev, "Cannot create instance: %d\n", result);
goto error_create;
}
/* setup the fops and upload the firmware */
i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
i1480->mac_fw_name = "i1480-usb-0.0.bin";
i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
i1480->phy_fw_name = "i1480-phy-0.0.bin";
i1480->dev = &iface->dev;
i1480->write = i1480_usb_write;
i1480->read = i1480_usb_read;
i1480->rc_setup = NULL;
i1480->wait_init_done = i1480_usb_wait_init_done;
i1480->cmd = i1480_usb_cmd;
result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
if (result >= 0) {
usb_reset_device(i1480_usb->usb_dev);
result = -ENODEV; /* we don't want to bind to the iface */
}
i1480_usb_destroy(i1480_usb);
error_create:
kfree(i1480->cmd_buf);
error_buf_alloc:
kfree(i1480_usb);
error:
return result;
}
MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
MODULE_FIRMWARE("i1480-usb-0.0.bin");
MODULE_FIRMWARE("i1480-phy-0.0.bin");
#define i1480_USB_DEV(v, p) \
{ \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE \
| USB_DEVICE_ID_MATCH_DEV_INFO \
| USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (v), \
.idProduct = (p), \
.bDeviceClass = 0xff, \
.bDeviceSubClass = 0xff, \
.bDeviceProtocol = 0xff, \
.bInterfaceClass = 0xff, \
.bInterfaceSubClass = 0xff, \
.bInterfaceProtocol = 0xff, \
}
/** USB device ID's that we handle */
static const struct usb_device_id i1480_usb_id_table[] = {
i1480_USB_DEV(0x8086, 0xdf3b),
i1480_USB_DEV(0x15a9, 0x0005),
i1480_USB_DEV(0x07d1, 0x3802),
i1480_USB_DEV(0x050d, 0x305a),
i1480_USB_DEV(0x3495, 0x3007),
{},
};
MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
static struct usb_driver i1480_dfu_driver = {
.name = "i1480-dfu-usb",
.id_table = i1480_usb_id_table,
.probe = i1480_usb_probe,
.disconnect = NULL,
};
module_usb_driver(i1480_dfu_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Xmister/endeavoru-jb-crc-3.1.10 | drivers/mca/mca-driver.c | 9951 | 1788 | /* -*- mode: c; c-basic-offset: 8 -*- */
/*
* MCA driver support functions for sysfs.
*
* (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/device.h>
#include <linux/mca.h>
#include <linux/module.h>
int mca_register_driver(struct mca_driver *mca_drv)
{
int r;
if (MCA_bus) {
mca_drv->driver.bus = &mca_bus_type;
if ((r = driver_register(&mca_drv->driver)) < 0)
return r;
mca_drv->integrated_id = 0;
}
return 0;
}
EXPORT_SYMBOL(mca_register_driver);
int mca_register_driver_integrated(struct mca_driver *mca_driver,
int integrated_id)
{
int r = mca_register_driver(mca_driver);
if (!r)
mca_driver->integrated_id = integrated_id;
return r;
}
EXPORT_SYMBOL(mca_register_driver_integrated);
void mca_unregister_driver(struct mca_driver *mca_drv)
{
if (MCA_bus)
driver_unregister(&mca_drv->driver);
}
EXPORT_SYMBOL(mca_unregister_driver);
| gpl-2.0 |
vic-nation/android_kernel_samsung_d2_nofork | tools/power/cpupower/bench/benchmark.c | 9951 | 5607 | /* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include "config.h"
#include "system.h"
#include "benchmark.h"
/* Print out progress if we log into a file */
#define show_progress(total_time, progress_time) \
if (config->output != stdout) { \
fprintf(stdout, "Progress: %02lu %%\r", \
(progress_time * 100) / total_time); \
fflush(stdout); \
}
/**
* compute how many rounds of calculation we should do
* to get the given load time
*
* @param load aimed load time in µs
*
* @retval rounds of calculation
**/
unsigned int calculate_timespace(long load, struct config *config)
{
int i;
long long now, then;
unsigned int estimated = GAUGECOUNT;
unsigned int rounds = 0;
unsigned int timed = 0;
if (config->verbose)
printf("calibrating load of %lius, please wait...\n", load);
/* get the initial calculation time for a specific number of rounds */
now = get_time();
ROUNDS(estimated);
then = get_time();
timed = (unsigned int)(then - now);
/* approximation of the wanted load time by comparing with the
* initial calculation time */
for (i = 0; i < 4; i++) {
rounds = (unsigned int)(load * estimated / timed);
dprintf("calibrating with %u rounds\n", rounds);
now = get_time();
ROUNDS(rounds);
then = get_time();
timed = (unsigned int)(then - now);
estimated = rounds;
}
if (config->verbose)
printf("calibration done\n");
return estimated;
}
/**
* benchmark
* generates a specific sleep an load time with the performance
* governor and compares the used time for same calculations done
* with the configured powersave governor
*
* @param config config values for the benchmark
*
**/
void start_benchmark(struct config *config)
{
unsigned int _round, cycle;
long long now, then;
long sleep_time = 0, load_time = 0;
long performance_time = 0, powersave_time = 0;
unsigned int calculations;
unsigned long total_time = 0, progress_time = 0;
sleep_time = config->sleep;
load_time = config->load;
/* For the progress bar */
for (_round = 1; _round <= config->rounds; _round++)
total_time += _round * (config->sleep + config->load);
total_time *= 2; /* powersave and performance cycles */
for (_round = 0; _round < config->rounds; _round++) {
performance_time = 0LL;
powersave_time = 0LL;
show_progress(total_time, progress_time);
/* set the cpufreq governor to "performance" which disables
* P-State switching. */
if (set_cpufreq_governor("performance", config->cpu) != 0)
return;
/* calibrate the calculation time. the resulting calculation
* _rounds should produce a load which matches the configured
* load time */
calculations = calculate_timespace(load_time, config);
if (config->verbose)
printf("_round %i: doing %u cycles with %u calculations"
" for %lius\n", _round + 1, config->cycles,
calculations, load_time);
fprintf(config->output, "%u %li %li ",
_round, load_time, sleep_time);
if (config->verbose)
printf("avarage: %lius, rps:%li\n",
load_time / calculations,
1000000 * calculations / load_time);
/* do some sleep/load cycles with the performance governor */
for (cycle = 0; cycle < config->cycles; cycle++) {
now = get_time();
usleep(sleep_time);
ROUNDS(calculations);
then = get_time();
performance_time += then - now - sleep_time;
if (config->verbose)
printf("performance cycle took %lius, "
"sleep: %lius, "
"load: %lius, rounds: %u\n",
(long)(then - now), sleep_time,
load_time, calculations);
}
fprintf(config->output, "%li ",
performance_time / config->cycles);
progress_time += sleep_time + load_time;
show_progress(total_time, progress_time);
/* set the powersave governor which activates P-State switching
* again */
if (set_cpufreq_governor(config->governor, config->cpu) != 0)
return;
/* again, do some sleep/load cycles with the
* powersave governor */
for (cycle = 0; cycle < config->cycles; cycle++) {
now = get_time();
usleep(sleep_time);
ROUNDS(calculations);
then = get_time();
powersave_time += then - now - sleep_time;
if (config->verbose)
printf("powersave cycle took %lius, "
"sleep: %lius, "
"load: %lius, rounds: %u\n",
(long)(then - now), sleep_time,
load_time, calculations);
}
progress_time += sleep_time + load_time;
/* compare the avarage sleep/load cycles */
fprintf(config->output, "%li ",
powersave_time / config->cycles);
fprintf(config->output, "%.3f\n",
performance_time * 100.0 / powersave_time);
fflush(config->output);
if (config->verbose)
printf("performance is at %.2f%%\n",
performance_time * 100.0 / powersave_time);
sleep_time += config->sleep_step;
load_time += config->load_step;
}
}
| gpl-2.0 |
lazy404/openvz-test | net/tipc/subscr.c | 736 | 16668 | /*
* net/tipc/subscr.c: TIPC network topology service
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005-2007, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "dbg.h"
#include "name_table.h"
#include "port.h"
#include "ref.h"
#include "subscr.h"
/**
* struct subscriber - TIPC network topology subscriber
* @port_ref: object reference to server port connecting to subscriber
* @lock: pointer to spinlock controlling access to subscriber's server port
* @subscriber_list: adjacent subscribers in top. server's list of subscribers
* @subscription_list: list of subscription objects for this subscriber
*/
struct subscriber {
u32 port_ref;
spinlock_t *lock;
struct list_head subscriber_list;
struct list_head subscription_list;
};
/**
* struct top_srv - TIPC network topology subscription service
* @user_ref: TIPC userid of subscription service
* @setup_port: reference to TIPC port that handles subscription requests
* @subscription_count: number of active subscriptions (not subscribers!)
* @subscriber_list: list of ports subscribing to service
* @lock: spinlock govering access to subscriber list
*/
struct top_srv {
u32 user_ref;
u32 setup_port;
atomic_t subscription_count;
struct list_head subscriber_list;
spinlock_t lock;
};
static struct top_srv topsrv = { 0 };
/**
* htohl - convert value to endianness used by destination
* @in: value to convert
* @swap: non-zero if endianness must be reversed
*
* Returns converted value
*/
static u32 htohl(u32 in, int swap)
{
return swap ? swab32(in) : in;
}
/**
* subscr_send_event - send a message containing a tipc_event to the subscriber
*
* Note: Must not hold subscriber's server port lock, since tipc_send() will
* try to take the lock if the message is rejected and returned!
*/
static void subscr_send_event(struct subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
u32 port_ref,
u32 node)
{
struct iovec msg_sect;
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
tipc_send(sub->server_ref, 1, &msg_sect);
}
/**
* tipc_subscr_overlap - test for subscription overlap with the given values
*
* Returns 1 if there is overlap, otherwise 0.
*/
int tipc_subscr_overlap(struct subscription *sub,
u32 found_lower,
u32 found_upper)
{
if (found_lower < sub->seq.lower)
found_lower = sub->seq.lower;
if (found_upper > sub->seq.upper)
found_upper = sub->seq.upper;
if (found_lower > found_upper)
return 0;
return 1;
}
/**
* tipc_subscr_report_overlap - issue event if there is subscription overlap
*
* Protected by nameseq.lock in name_table.c
*/
void tipc_subscr_report_overlap(struct subscription *sub,
u32 found_lower,
u32 found_upper,
u32 event,
u32 port_ref,
u32 node,
int must)
{
if (!tipc_subscr_overlap(sub, found_lower, found_upper))
return;
if (!must && !(sub->filter & TIPC_SUB_PORTS))
return;
sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
}
/**
* subscr_timeout - subscription timeout has occurred
*/
static void subscr_timeout(struct subscription *sub)
{
struct port *server_port;
/* Validate server port reference (in case subscriber is terminating) */
server_port = tipc_port_lock(sub->server_ref);
if (server_port == NULL)
return;
/* Validate timeout (in case subscription is being cancelled) */
if (sub->timeout == TIPC_WAIT_FOREVER) {
tipc_port_unlock(server_port);
return;
}
/* Unlink subscription from name table */
tipc_nametbl_unsubscribe(sub);
/* Unlink subscription from subscriber */
list_del(&sub->subscription_list);
/* Release subscriber's server port */
tipc_port_unlock(server_port);
/* Notify subscriber of timeout */
subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
/* Now destroy subscription */
k_term_timer(&sub->timer);
kfree(sub);
atomic_dec(&topsrv.subscription_count);
}
/**
* subscr_del - delete a subscription within a subscription list
*
* Called with subscriber port locked.
*/
static void subscr_del(struct subscription *sub)
{
tipc_nametbl_unsubscribe(sub);
list_del(&sub->subscription_list);
kfree(sub);
atomic_dec(&topsrv.subscription_count);
}
/**
* subscr_terminate - terminate communication with a subscriber
*
* Called with subscriber port locked. Routine must temporarily release lock
* to enable subscription timeout routine(s) to finish without deadlocking;
* the lock is then reclaimed to allow caller to release it upon return.
* (This should work even in the unlikely event some other thread creates
* a new object reference in the interim that uses this lock; this routine will
* simply wait for it to be released, then claim it.)
*/
static void subscr_terminate(struct subscriber *subscriber)
{
u32 port_ref;
struct subscription *sub;
struct subscription *sub_temp;
/* Invalidate subscriber reference */
port_ref = subscriber->port_ref;
subscriber->port_ref = 0;
spin_unlock_bh(subscriber->lock);
/* Sever connection to subscriber */
tipc_shutdown(port_ref);
tipc_deleteport(port_ref);
/* Destroy any existing subscriptions for subscriber */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (sub->timeout != TIPC_WAIT_FOREVER) {
k_cancel_timer(&sub->timer);
k_term_timer(&sub->timer);
}
dbg("Term: Removing sub %u,%u,%u from subscriber %x list\n",
sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
/* Remove subscriber from topology server's subscriber list */
spin_lock_bh(&topsrv.lock);
list_del(&subscriber->subscriber_list);
spin_unlock_bh(&topsrv.lock);
/* Reclaim subscriber lock */
spin_lock_bh(subscriber->lock);
/* Now destroy subscriber */
kfree(subscriber);
}
/**
* subscr_cancel - handle subscription cancellation request
*
* Called with subscriber port locked. Routine must temporarily release lock
* to enable the subscription timeout routine to finish without deadlocking;
* the lock is then reclaimed to allow caller to release it upon return.
*
* Note that fields of 's' use subscriber's endianness!
*/
static void subscr_cancel(struct tipc_subscr *s,
struct subscriber *subscriber)
{
struct subscription *sub;
struct subscription *sub_temp;
int found = 0;
/* Find first matching subscription, exit if not found */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
found = 1;
break;
}
}
if (!found)
return;
/* Cancel subscription timer (if used), then delete subscription */
if (sub->timeout != TIPC_WAIT_FOREVER) {
sub->timeout = TIPC_WAIT_FOREVER;
spin_unlock_bh(subscriber->lock);
k_cancel_timer(&sub->timer);
k_term_timer(&sub->timer);
spin_lock_bh(subscriber->lock);
}
dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n",
sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
subscr_del(sub);
}
/**
* subscr_subscribe - create subscription for subscriber
*
* Called with subscriber port locked.
*/
static struct subscription *subscr_subscribe(struct tipc_subscr *s,
struct subscriber *subscriber)
{
struct subscription *sub;
int swap;
/* Determine subscriber's endianness */
swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
/* Detect & process a subscription cancellation request */
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
return NULL;
}
/* Refuse subscription if global limit exceeded */
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
warn("Subscription rejected, subscription limit reached (%u)\n",
tipc_max_subscriptions);
subscr_terminate(subscriber);
return NULL;
}
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
warn("Subscription rejected, no memory\n");
subscr_terminate(subscriber);
return NULL;
}
/* Initialize subscription object */
sub->seq.type = htohl(s->seq.type, swap);
sub->seq.lower = htohl(s->seq.lower, swap);
sub->seq.upper = htohl(s->seq.upper, swap);
sub->timeout = htohl(s->timeout, swap);
sub->filter = htohl(s->filter, swap);
if ((!(sub->filter & TIPC_SUB_PORTS)
== !(sub->filter & TIPC_SUB_SERVICE))
|| (sub->seq.lower > sub->seq.upper)) {
warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
return NULL;
}
sub->event_cb = subscr_send_event;
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->server_ref = subscriber->port_ref;
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
atomic_inc(&topsrv.subscription_count);
if (sub->timeout != TIPC_WAIT_FOREVER) {
k_init_timer(&sub->timer,
(Handler)subscr_timeout, (unsigned long)sub);
k_start_timer(&sub->timer, sub->timeout);
}
return sub;
}
/**
* subscr_conn_shutdown_event - handle termination request from subscriber
*
* Called with subscriber's server port unlocked.
*/
static void subscr_conn_shutdown_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
unsigned char const *data,
unsigned int size,
int reason)
{
struct subscriber *subscriber = usr_handle;
spinlock_t *subscriber_lock;
if (tipc_port_lock(port_ref) == NULL)
return;
subscriber_lock = subscriber->lock;
subscr_terminate(subscriber);
spin_unlock_bh(subscriber_lock);
}
/**
* subscr_conn_msg_event - handle new subscription request from subscriber
*
* Called with subscriber's server port unlocked.
*/
static void subscr_conn_msg_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
const unchar *data,
u32 size)
{
struct subscriber *subscriber = usr_handle;
spinlock_t *subscriber_lock;
struct subscription *sub;
/*
* Lock subscriber's server port (& make a local copy of lock pointer,
* in case subscriber is deleted while processing subscription request)
*/
if (tipc_port_lock(port_ref) == NULL)
return;
subscriber_lock = subscriber->lock;
if (size != sizeof(struct tipc_subscr)) {
subscr_terminate(subscriber);
spin_unlock_bh(subscriber_lock);
} else {
sub = subscr_subscribe((struct tipc_subscr *)data, subscriber);
spin_unlock_bh(subscriber_lock);
if (sub != NULL) {
/*
* We must release the server port lock before adding a
* subscription to the name table since TIPC needs to be
* able to (re)acquire the port lock if an event message
* issued by the subscription process is rejected and
* returned. The subscription cannot be deleted while
* it is being added to the name table because:
* a) the single-threading of the native API port code
* ensures the subscription cannot be cancelled and
* the subscriber connection cannot be broken, and
* b) the name table lock ensures the subscription
* timeout code cannot delete the subscription,
* so the subscription object is still protected.
*/
tipc_nametbl_subscribe(sub);
}
}
}
/**
* subscr_named_msg_event - handle request to establish a new subscriber
*/
static void subscr_named_msg_event(void *usr_handle,
u32 port_ref,
struct sk_buff **buf,
const unchar *data,
u32 size,
u32 importance,
struct tipc_portid const *orig,
struct tipc_name_seq const *dest)
{
static struct iovec msg_sect = {NULL, 0};
struct subscriber *subscriber;
u32 server_port_ref;
/* Create subscriber object */
subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
warn("Subscriber rejected, no memory\n");
return;
}
INIT_LIST_HEAD(&subscriber->subscription_list);
INIT_LIST_HEAD(&subscriber->subscriber_list);
/* Create server port & establish connection to subscriber */
tipc_createport(topsrv.user_ref,
subscriber,
importance,
NULL,
NULL,
subscr_conn_shutdown_event,
NULL,
NULL,
subscr_conn_msg_event,
NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
warn("Subscriber rejected, unable to create port\n");
kfree(subscriber);
return;
}
tipc_connect2port(subscriber->port_ref, orig);
/* Lock server port (& save lock address for future use) */
subscriber->lock = tipc_port_lock(subscriber->port_ref)->publ.lock;
/* Add subscriber to topology server's subscriber list */
spin_lock_bh(&topsrv.lock);
list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
spin_unlock_bh(&topsrv.lock);
/* Unlock server port */
server_port_ref = subscriber->port_ref;
spin_unlock_bh(subscriber->lock);
/* Send an ACK- to complete connection handshaking */
tipc_send(server_port_ref, 1, &msg_sect);
/* Handle optional subscription request */
if (size != 0) {
subscr_conn_msg_event(subscriber, server_port_ref,
buf, data, size);
}
}
int tipc_subscr_start(void)
{
struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
int res = -1;
memset(&topsrv, 0, sizeof (topsrv));
spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
spin_lock_bh(&topsrv.lock);
res = tipc_attach(&topsrv.user_ref, NULL, NULL);
if (res) {
spin_unlock_bh(&topsrv.lock);
return res;
}
res = tipc_createport(topsrv.user_ref,
NULL,
TIPC_CRITICAL_IMPORTANCE,
NULL,
NULL,
NULL,
NULL,
subscr_named_msg_event,
NULL,
NULL,
&topsrv.setup_port);
if (res)
goto failed;
res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
if (res)
goto failed;
spin_unlock_bh(&topsrv.lock);
return 0;
failed:
err("Failed to create subscription service\n");
tipc_detach(topsrv.user_ref);
topsrv.user_ref = 0;
spin_unlock_bh(&topsrv.lock);
return res;
}
void tipc_subscr_stop(void)
{
struct subscriber *subscriber;
struct subscriber *subscriber_temp;
spinlock_t *subscriber_lock;
if (topsrv.user_ref) {
tipc_deleteport(topsrv.setup_port);
list_for_each_entry_safe(subscriber, subscriber_temp,
&topsrv.subscriber_list,
subscriber_list) {
subscriber_lock = subscriber->lock;
spin_lock_bh(subscriber_lock);
subscr_terminate(subscriber);
spin_unlock_bh(subscriber_lock);
}
tipc_detach(topsrv.user_ref);
topsrv.user_ref = 0;
}
}
int tipc_ispublished(struct tipc_name const *name)
{
u32 domain = 0;
return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
}
| gpl-2.0 |
animalcreek/linux | tools/testing/selftests/networking/timestamping/txtimestamp.c | 992 | 13303 | /*
* Copyright 2014 Google Inc.
* Author: willemb@google.com (Willem de Bruijn)
*
* Test software tx timestamping, including
*
* - SCHED, SND and ACK timestamps
* - RAW, UDP and TCP
* - IPv4 and IPv6
* - various packet sizes (to test GSO and TSO)
*
* Consult the command line arguments for help on running
* the various testcases.
*
* This test requires a dummy TCP server.
* A simple `nc6 [-u] -l -p $DESTPORT` will do
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <asm/types.h>
#include <error.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/errqueue.h>
#include <linux/if_ether.h>
#include <linux/net_tstamp.h>
#include <netdb.h>
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netpacket/packet.h>
#include <poll.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
/* command line parameters */
static int cfg_proto = SOCK_STREAM;
static int cfg_ipproto = IPPROTO_TCP;
static int cfg_num_pkts = 4;
static int do_ipv4 = 1;
static int do_ipv6 = 1;
static int cfg_payload_len = 10;
static bool cfg_show_payload;
static bool cfg_do_pktinfo;
static bool cfg_loop_nodata;
static uint16_t dest_port = 9000;
static struct sockaddr_in daddr;
static struct sockaddr_in6 daddr6;
static struct timespec ts_prev;
static void __print_timestamp(const char *name, struct timespec *cur,
uint32_t key, int payload_len)
{
if (!(cur->tv_sec | cur->tv_nsec))
return;
fprintf(stderr, " %s: %lu s %lu us (seq=%u, len=%u)",
name, cur->tv_sec, cur->tv_nsec / 1000,
key, payload_len);
if ((ts_prev.tv_sec | ts_prev.tv_nsec)) {
int64_t cur_ms, prev_ms;
cur_ms = (long) cur->tv_sec * 1000 * 1000;
cur_ms += cur->tv_nsec / 1000;
prev_ms = (long) ts_prev.tv_sec * 1000 * 1000;
prev_ms += ts_prev.tv_nsec / 1000;
fprintf(stderr, " (%+" PRId64 " us)", cur_ms - prev_ms);
}
ts_prev = *cur;
fprintf(stderr, "\n");
}
static void print_timestamp_usr(void)
{
struct timespec ts;
struct timeval tv; /* avoid dependency on -lrt */
gettimeofday(&tv, NULL);
ts.tv_sec = tv.tv_sec;
ts.tv_nsec = tv.tv_usec * 1000;
__print_timestamp(" USR", &ts, 0, 0);
}
static void print_timestamp(struct scm_timestamping *tss, int tstype,
int tskey, int payload_len)
{
const char *tsname;
switch (tstype) {
case SCM_TSTAMP_SCHED:
tsname = " ENQ";
break;
case SCM_TSTAMP_SND:
tsname = " SND";
break;
case SCM_TSTAMP_ACK:
tsname = " ACK";
break;
default:
error(1, 0, "unknown timestamp type: %u",
tstype);
}
__print_timestamp(tsname, &tss->ts[0], tskey, payload_len);
}
/* TODO: convert to check_and_print payload once API is stable */
static void print_payload(char *data, int len)
{
int i;
if (!len)
return;
if (len > 70)
len = 70;
fprintf(stderr, "payload: ");
for (i = 0; i < len; i++)
fprintf(stderr, "%02hhx ", data[i]);
fprintf(stderr, "\n");
}
static void print_pktinfo(int family, int ifindex, void *saddr, void *daddr)
{
char sa[INET6_ADDRSTRLEN], da[INET6_ADDRSTRLEN];
fprintf(stderr, " pktinfo: ifindex=%u src=%s dst=%s\n",
ifindex,
saddr ? inet_ntop(family, saddr, sa, sizeof(sa)) : "unknown",
daddr ? inet_ntop(family, daddr, da, sizeof(da)) : "unknown");
}
static void __poll(int fd)
{
struct pollfd pollfd;
int ret;
memset(&pollfd, 0, sizeof(pollfd));
pollfd.fd = fd;
ret = poll(&pollfd, 1, 100);
if (ret != 1)
error(1, errno, "poll");
}
static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
{
struct sock_extended_err *serr = NULL;
struct scm_timestamping *tss = NULL;
struct cmsghdr *cm;
int batch = 0;
for (cm = CMSG_FIRSTHDR(msg);
cm && cm->cmsg_len;
cm = CMSG_NXTHDR(msg, cm)) {
if (cm->cmsg_level == SOL_SOCKET &&
cm->cmsg_type == SCM_TIMESTAMPING) {
tss = (void *) CMSG_DATA(cm);
} else if ((cm->cmsg_level == SOL_IP &&
cm->cmsg_type == IP_RECVERR) ||
(cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_RECVERR)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
fprintf(stderr, "unknown ip error %d %d\n",
serr->ee_errno,
serr->ee_origin);
serr = NULL;
}
} else if (cm->cmsg_level == SOL_IP &&
cm->cmsg_type == IP_PKTINFO) {
struct in_pktinfo *info = (void *) CMSG_DATA(cm);
print_pktinfo(AF_INET, info->ipi_ifindex,
&info->ipi_spec_dst, &info->ipi_addr);
} else if (cm->cmsg_level == SOL_IPV6 &&
cm->cmsg_type == IPV6_PKTINFO) {
struct in6_pktinfo *info6 = (void *) CMSG_DATA(cm);
print_pktinfo(AF_INET6, info6->ipi6_ifindex,
NULL, &info6->ipi6_addr);
} else
fprintf(stderr, "unknown cmsg %d,%d\n",
cm->cmsg_level, cm->cmsg_type);
if (serr && tss) {
print_timestamp(tss, serr->ee_info, serr->ee_data,
payload_len);
serr = NULL;
tss = NULL;
batch++;
}
}
if (batch > 1)
fprintf(stderr, "batched %d timestamps\n", batch);
}
static int recv_errmsg(int fd)
{
static char ctrl[1024 /* overprovision*/];
static struct msghdr msg;
struct iovec entry;
static char *data;
int ret = 0;
data = malloc(cfg_payload_len);
if (!data)
error(1, 0, "malloc");
memset(&msg, 0, sizeof(msg));
memset(&entry, 0, sizeof(entry));
memset(ctrl, 0, sizeof(ctrl));
entry.iov_base = data;
entry.iov_len = cfg_payload_len;
msg.msg_iov = &entry;
msg.msg_iovlen = 1;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = ctrl;
msg.msg_controllen = sizeof(ctrl);
ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
if (ret == -1 && errno != EAGAIN)
error(1, errno, "recvmsg");
if (ret >= 0) {
__recv_errmsg_cmsg(&msg, ret);
if (cfg_show_payload)
print_payload(data, cfg_payload_len);
}
free(data);
return ret == -1;
}
static void do_test(int family, unsigned int opt)
{
char *buf;
int fd, i, val = 1, total_len;
if (family == AF_INET6 && cfg_proto != SOCK_STREAM) {
/* due to lack of checksum generation code */
fprintf(stderr, "test: skipping datagram over IPv6\n");
return;
}
total_len = cfg_payload_len;
if (cfg_proto == SOCK_RAW) {
total_len += sizeof(struct udphdr);
if (cfg_ipproto == IPPROTO_RAW)
total_len += sizeof(struct iphdr);
}
buf = malloc(total_len);
if (!buf)
error(1, 0, "malloc");
fd = socket(family, cfg_proto, cfg_ipproto);
if (fd < 0)
error(1, errno, "socket");
if (cfg_proto == SOCK_STREAM) {
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
(char*) &val, sizeof(val)))
error(1, 0, "setsockopt no nagle");
if (family == PF_INET) {
if (connect(fd, (void *) &daddr, sizeof(daddr)))
error(1, errno, "connect ipv4");
} else {
if (connect(fd, (void *) &daddr6, sizeof(daddr6)))
error(1, errno, "connect ipv6");
}
}
if (cfg_do_pktinfo) {
if (family == AF_INET6) {
if (setsockopt(fd, SOL_IPV6, IPV6_RECVPKTINFO,
&val, sizeof(val)))
error(1, errno, "setsockopt pktinfo ipv6");
} else {
if (setsockopt(fd, SOL_IP, IP_PKTINFO,
&val, sizeof(val)))
error(1, errno, "setsockopt pktinfo ipv4");
}
}
opt |= SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_OPT_CMSG |
SOF_TIMESTAMPING_OPT_ID;
if (cfg_loop_nodata)
opt |= SOF_TIMESTAMPING_OPT_TSONLY;
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
(char *) &opt, sizeof(opt)))
error(1, 0, "setsockopt timestamping");
for (i = 0; i < cfg_num_pkts; i++) {
memset(&ts_prev, 0, sizeof(ts_prev));
memset(buf, 'a' + i, total_len);
if (cfg_proto == SOCK_RAW) {
struct udphdr *udph;
int off = 0;
if (cfg_ipproto == IPPROTO_RAW) {
struct iphdr *iph = (void *) buf;
memset(iph, 0, sizeof(*iph));
iph->ihl = 5;
iph->version = 4;
iph->ttl = 2;
iph->daddr = daddr.sin_addr.s_addr;
iph->protocol = IPPROTO_UDP;
/* kernel writes saddr, csum, len */
off = sizeof(*iph);
}
udph = (void *) buf + off;
udph->source = ntohs(9000); /* random spoof */
udph->dest = ntohs(dest_port);
udph->len = ntohs(sizeof(*udph) + cfg_payload_len);
udph->check = 0; /* not allowed for IPv6 */
}
print_timestamp_usr();
if (cfg_proto != SOCK_STREAM) {
if (family == PF_INET)
val = sendto(fd, buf, total_len, 0, (void *) &daddr, sizeof(daddr));
else
val = sendto(fd, buf, total_len, 0, (void *) &daddr6, sizeof(daddr6));
} else {
val = send(fd, buf, cfg_payload_len, 0);
}
if (val != total_len)
error(1, errno, "send");
/* wait for all errors to be queued, else ACKs arrive OOO */
usleep(50 * 1000);
__poll(fd);
while (!recv_errmsg(fd)) {}
}
if (close(fd))
error(1, errno, "close");
free(buf);
usleep(400 * 1000);
}
static void __attribute__((noreturn)) usage(const char *filepath)
{
fprintf(stderr, "\nUsage: %s [options] hostname\n"
"\nwhere options are:\n"
" -4: only IPv4\n"
" -6: only IPv6\n"
" -h: show this message\n"
" -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
" -n: set no-payload option\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
" -p N: connect to port N\n"
" -u: use udp\n"
" -x: show payload (up to 70 bytes)\n",
filepath);
exit(1);
}
static void parse_opt(int argc, char **argv)
{
int proto_count = 0;
char c;
while ((c = getopt(argc, argv, "46hIl:np:rRux")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
break;
case '6':
do_ipv4 = 0;
break;
case 'I':
cfg_do_pktinfo = true;
break;
case 'n':
cfg_loop_nodata = true;
break;
case 'r':
proto_count++;
cfg_proto = SOCK_RAW;
cfg_ipproto = IPPROTO_UDP;
break;
case 'R':
proto_count++;
cfg_proto = SOCK_RAW;
cfg_ipproto = IPPROTO_RAW;
break;
case 'u':
proto_count++;
cfg_proto = SOCK_DGRAM;
cfg_ipproto = IPPROTO_UDP;
break;
case 'l':
cfg_payload_len = strtoul(optarg, NULL, 10);
break;
case 'p':
dest_port = strtoul(optarg, NULL, 10);
break;
case 'x':
cfg_show_payload = true;
break;
case 'h':
default:
usage(argv[0]);
}
}
if (!cfg_payload_len)
error(1, 0, "payload may not be nonzero");
if (cfg_proto != SOCK_STREAM && cfg_payload_len > 1472)
error(1, 0, "udp packet might exceed expected MTU");
if (!do_ipv4 && !do_ipv6)
error(1, 0, "pass -4 or -6, not both");
if (proto_count > 1)
error(1, 0, "pass -r, -R or -u, not multiple");
if (optind != argc - 1)
error(1, 0, "missing required hostname argument");
}
static void resolve_hostname(const char *hostname)
{
struct addrinfo *addrs, *cur;
int have_ipv4 = 0, have_ipv6 = 0;
if (getaddrinfo(hostname, NULL, NULL, &addrs))
error(1, errno, "getaddrinfo");
cur = addrs;
while (cur && !have_ipv4 && !have_ipv6) {
if (!have_ipv4 && cur->ai_family == AF_INET) {
memcpy(&daddr, cur->ai_addr, sizeof(daddr));
daddr.sin_port = htons(dest_port);
have_ipv4 = 1;
}
else if (!have_ipv6 && cur->ai_family == AF_INET6) {
memcpy(&daddr6, cur->ai_addr, sizeof(daddr6));
daddr6.sin6_port = htons(dest_port);
have_ipv6 = 1;
}
cur = cur->ai_next;
}
if (addrs)
freeaddrinfo(addrs);
do_ipv4 &= have_ipv4;
do_ipv6 &= have_ipv6;
}
static void do_main(int family)
{
fprintf(stderr, "family: %s\n",
family == PF_INET ? "INET" : "INET6");
fprintf(stderr, "test SND\n");
do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE);
fprintf(stderr, "test ENQ\n");
do_test(family, SOF_TIMESTAMPING_TX_SCHED);
fprintf(stderr, "test ENQ + SND\n");
do_test(family, SOF_TIMESTAMPING_TX_SCHED |
SOF_TIMESTAMPING_TX_SOFTWARE);
if (cfg_proto == SOCK_STREAM) {
fprintf(stderr, "\ntest ACK\n");
do_test(family, SOF_TIMESTAMPING_TX_ACK);
fprintf(stderr, "\ntest SND + ACK\n");
do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_ACK);
fprintf(stderr, "\ntest ENQ + SND + ACK\n");
do_test(family, SOF_TIMESTAMPING_TX_SCHED |
SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_TX_ACK);
}
}
const char *sock_names[] = { NULL, "TCP", "UDP", "RAW" };
int main(int argc, char **argv)
{
if (argc == 1)
usage(argv[0]);
parse_opt(argc, argv);
resolve_hostname(argv[argc - 1]);
fprintf(stderr, "protocol: %s\n", sock_names[cfg_proto]);
fprintf(stderr, "payload: %u\n", cfg_payload_len);
fprintf(stderr, "server port: %u\n", dest_port);
fprintf(stderr, "\n");
if (do_ipv4)
do_main(PF_INET);
if (do_ipv6)
do_main(PF_INET6);
return 0;
}
| gpl-2.0 |
bigbiff/i717-GB-Kernel | drivers/acpi/acpica/nsxfeval.c | 992 | 25941 | /*******************************************************************************
*
* Module Name: nsxfeval - Public interfaces to the ACPI subsystem
* ACPI Object evaluation interfaces
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2010, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "acinterp.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
/* Local prototypes */
static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object_typed
*
* PARAMETERS: Handle - Object handle (optional)
* Pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
* parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *external_params,
struct acpi_buffer *return_buffer,
acpi_object_type return_type)
{
acpi_status status;
u8 must_free = FALSE;
ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
/* Return buffer must be valid */
if (!return_buffer) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (return_buffer->length == ACPI_ALLOCATE_BUFFER) {
must_free = TRUE;
}
/* Evaluate the object */
status =
acpi_evaluate_object(handle, pathname, external_params,
return_buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Type ANY means "don't care" */
if (return_type == ACPI_TYPE_ANY) {
return_ACPI_STATUS(AE_OK);
}
if (return_buffer->length == 0) {
/* Error because caller specifically asked for a return value */
ACPI_ERROR((AE_INFO, "No return value"));
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Examine the object type returned from evaluate_object */
if (((union acpi_object *)return_buffer->pointer)->type == return_type) {
return_ACPI_STATUS(AE_OK);
}
/* Return object type does not match requested type */
ACPI_ERROR((AE_INFO,
"Incorrect return type [%s] requested [%s]",
acpi_ut_get_type_name(((union acpi_object *)return_buffer->
pointer)->type),
acpi_ut_get_type_name(return_type)));
if (must_free) {
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
ACPI_FREE(return_buffer->pointer);
return_buffer->pointer = NULL;
}
return_buffer->length = 0;
return_ACPI_STATUS(AE_TYPE);
}
ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object
*
* PARAMETERS: Handle - Object handle (optional)
* Pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
* return_buffer - Where to put method's return value (if
* any). If NULL, no value is returned.
*
* RETURN: Status
*
* DESCRIPTION: Find and evaluate the given object, passing the given
* parameters if necessary. One of "Handle" or "Pathname" must
* be valid (non-null)
*
******************************************************************************/
acpi_status
acpi_evaluate_object(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *external_params,
struct acpi_buffer *return_buffer)
{
acpi_status status;
struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
ACPI_FUNCTION_TRACE(acpi_evaluate_object);
/* Allocate and initialize the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->pathname = pathname;
/* Convert and validate the device handle */
info->prefix_node = acpi_ns_validate_handle(handle);
if (!info->prefix_node) {
status = AE_BAD_PARAMETER;
goto cleanup;
}
/*
* If there are parameters to be passed to a control method, the external
* objects must all be converted to internal objects
*/
if (external_params && external_params->count) {
/*
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
external_params->
count +
1) * sizeof(void *));
if (!info->parameters) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Convert each external object in the list to an internal object */
for (i = 0; i < external_params->count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_params->
pointer[i],
&info->
parameters[i]);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
info->parameters[external_params->count] = NULL;
}
/*
* Three major cases:
* 1) Fully qualified pathname
* 2) No handle, not fully qualified pathname (error)
* 3) Valid handle
*/
if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
/* The path is fully qualified, just evaluate by name */
info->prefix_node = NULL;
status = acpi_ns_evaluate(info);
} else if (!handle) {
/*
* A handle is optional iff a fully qualified pathname is specified.
* Since we've already handled fully qualified names above, this is
* an error
*/
if (!pathname) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Both Handle and Pathname are NULL"));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Null Handle with relative pathname [%s]",
pathname));
}
status = AE_BAD_PARAMETER;
} else {
/* We have a namespace a node and a possible relative path */
status = acpi_ns_evaluate(info);
}
/*
* If we are expecting a return value, and all went well above,
* copy the return value to an external object.
*/
if (return_buffer) {
if (!info->return_object) {
return_buffer->length = 0;
} else {
if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
ACPI_DESC_TYPE_NAMED) {
/*
* If we received a NS Node as a return object, this means that
* the object we are evaluating has nothing interesting to
* return (such as a mutex, etc.) We return an error because
* these types are essentially unsupported by this interface.
* We don't check up front because this makes it easier to add
* support for various types at a later date if necessary.
*/
status = AE_TYPE;
info->return_object = NULL; /* No need to delete a NS Node */
return_buffer->length = 0;
}
if (ACPI_SUCCESS(status)) {
/* Dereference Index and ref_of references */
acpi_ns_resolve_references(info);
/* Get the size of the returned object */
status =
acpi_ut_get_object_size(info->return_object,
&buffer_space_needed);
if (ACPI_SUCCESS(status)) {
/* Validate/Allocate/Clear caller buffer */
status =
acpi_ut_initialize_buffer
(return_buffer,
buffer_space_needed);
if (ACPI_FAILURE(status)) {
/*
* Caller's buffer is too small or a new one can't
* be allocated
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Needed buffer size %X, %s\n",
(u32)
buffer_space_needed,
acpi_format_exception
(status)));
} else {
/* We have enough space for the object, build it */
status =
acpi_ut_copy_iobject_to_eobject
(info->return_object,
return_buffer);
}
}
}
}
}
if (info->return_object) {
/*
* Delete the internal return object. NOTE: Interpreter must be
* locked to avoid race condition.
*/
acpi_ex_enter_interpreter();
/* Remove one reference on the return object (should delete it) */
acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
cleanup:
/* Free the input parameter list (if we created one) */
if (info->parameters) {
/* Free the allocated parameter block */
acpi_ut_delete_internal_object_list(info->parameters);
}
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
/*******************************************************************************
*
* FUNCTION: acpi_ns_resolve_references
*
* PARAMETERS: Info - Evaluation info block
*
* RETURN: Info->return_object is replaced with the dereferenced object
*
* DESCRIPTION: Dereference certain reference objects. Called before an
* internal return object is converted to an external union acpi_object.
*
* Performs an automatic dereference of Index and ref_of reference objects.
* These reference objects are not supported by the union acpi_object, so this is a
* last resort effort to return something useful. Also, provides compatibility
* with other ACPI implementations.
*
* NOTE: does not handle references within returned package objects or nested
* references, but this support could be added later if found to be necessary.
*
******************************************************************************/
static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
{
union acpi_operand_object *obj_desc = NULL;
struct acpi_namespace_node *node;
/* We are interested in reference objects only */
if ((info->return_object)->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
return;
}
/*
* Two types of references are supported - those created by Index and
* ref_of operators. A name reference (AML_NAMEPATH_OP) can be converted
* to an union acpi_object, so it is not dereferenced here. A ddb_handle
* (AML_LOAD_OP) cannot be dereferenced, nor can it be converted to
* an union acpi_object.
*/
switch (info->return_object->reference.class) {
case ACPI_REFCLASS_INDEX:
obj_desc = *(info->return_object->reference.where);
break;
case ACPI_REFCLASS_REFOF:
node = info->return_object->reference.object;
if (node) {
obj_desc = node->object;
}
break;
default:
return;
}
/* Replace the existing reference object */
if (obj_desc) {
acpi_ut_add_reference(obj_desc);
acpi_ut_remove_reference(info->return_object);
info->return_object = obj_desc;
}
return;
}
/*******************************************************************************
*
* FUNCTION: acpi_walk_namespace
*
* PARAMETERS: Type - acpi_object_type to search for
* start_object - Handle in namespace where search begins
* max_depth - Depth to which search is to reach
* pre_order_visit - Called during tree pre-order visit
* when an object of "Type" is found
* post_order_visit - Called during tree post-order visit
* when an object of "Type" is found
* Context - Passed to user function(s) above
* return_value - Location where return value of
* user_function is put if terminated early
*
* RETURNS Return value from the user_function if terminated early.
* Otherwise, returns NULL.
*
* DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
* starting (and ending) at the object specified by start_handle.
* The callback function is called whenever an object that matches
* the type parameter is found. If the callback function returns
* a non-zero value, the search is terminated immediately and this
* value is returned to the caller.
*
* The point of this procedure is to provide a generic namespace
* walk routine that can be called from multiple places to
* provide multiple services; the callback function(s) can be
* tailored to each task, whether it is a print function,
* a compare function, etc.
*
******************************************************************************/
acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
u32 max_depth,
acpi_walk_callback pre_order_visit,
acpi_walk_callback post_order_visit,
void *context, void **return_value)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_walk_namespace);
/* Parameter validation */
if ((type > ACPI_TYPE_LOCAL_MAX) ||
(!max_depth) || (!pre_order_visit && !post_order_visit)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* Need to acquire the namespace reader lock to prevent interference
* with any concurrent table unloads (which causes the deletion of
* namespace objects). We cannot allow the deletion of a namespace node
* while the user function is using it. The exception to this are the
* nodes created and deleted during control method execution -- these
* nodes are marked as temporary nodes and are ignored by the namespace
* walk. Thus, control methods can be executed while holding the
* namespace deletion lock (and the user function can execute control
* methods.)
*/
status = acpi_ut_acquire_read_lock(&acpi_gbl_namespace_rw_lock);
if (ACPI_FAILURE(status)) {
return status;
}
/*
* Lock the namespace around the walk. The namespace will be
* unlocked/locked around each call to the user function - since the user
* function must be allowed to make ACPICA calls itself (for example, it
* will typically execute control methods during device enumeration.)
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
status = acpi_ns_walk_namespace(type, start_object, max_depth,
ACPI_NS_WALK_UNLOCK, pre_order_visit,
post_order_visit, context,
return_value);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
unlock_and_exit:
(void)acpi_ut_release_read_lock(&acpi_gbl_namespace_rw_lock);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_device_callback
*
* PARAMETERS: Callback from acpi_get_device
*
* RETURN: Status
*
* DESCRIPTION: Takes callbacks from walk_namespace and filters out all non-
* present devices, or if they specified a HID, it filters based
* on that.
*
******************************************************************************/
static acpi_status
acpi_ns_get_device_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
struct acpi_get_devices_info *info = context;
acpi_status status;
struct acpi_namespace_node *node;
u32 flags;
struct acpica_device_id *hid;
struct acpica_device_id_list *cid;
u32 i;
u8 found;
int no_match;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
node = acpi_ns_validate_handle(obj_handle);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
if (!node) {
return (AE_BAD_PARAMETER);
}
/*
* First, filter based on the device HID and CID.
*
* 01/2010: For this case where a specific HID is requested, we don't
* want to run _STA until we have an actual HID match. Thus, we will
* not unnecessarily execute _STA on devices for which the caller
* doesn't care about. Previously, _STA was executed unconditionally
* on all devices found here.
*
* A side-effect of this change is that now we will continue to search
* for a matching HID even under device trees where the parent device
* would have returned a _STA that indicates it is not present or
* not functioning (thus aborting the search on that branch).
*/
if (info->hid != NULL) {
status = acpi_ut_execute_HID(node, &hid);
if (status == AE_NOT_FOUND) {
return (AE_OK);
} else if (ACPI_FAILURE(status)) {
return (AE_CTRL_DEPTH);
}
no_match = ACPI_STRCMP(hid->string, info->hid);
ACPI_FREE(hid);
if (no_match) {
/*
* HID does not match, attempt match within the
* list of Compatible IDs (CIDs)
*/
status = acpi_ut_execute_CID(node, &cid);
if (status == AE_NOT_FOUND) {
return (AE_OK);
} else if (ACPI_FAILURE(status)) {
return (AE_CTRL_DEPTH);
}
/* Walk the CID list */
found = 0;
for (i = 0; i < cid->count; i++) {
if (ACPI_STRCMP(cid->ids[i].string, info->hid)
== 0) {
found = 1;
break;
}
}
ACPI_FREE(cid);
if (!found)
return (AE_OK);
}
}
/* Run _STA to determine if device is present */
status = acpi_ut_execute_STA(node, &flags);
if (ACPI_FAILURE(status)) {
return (AE_CTRL_DEPTH);
}
if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
!(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
/*
* Don't examine the children of the device only when the
* device is neither present nor functional. See ACPI spec,
* description of _STA for more information.
*/
return (AE_CTRL_DEPTH);
}
/* We have a valid device, invoke the user function */
status = info->user_function(obj_handle, nesting_level, info->context,
return_value);
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_get_devices
*
* PARAMETERS: HID - HID to search for. Can be NULL.
* user_function - Called when a matching object is found
* Context - Passed to user function
* return_value - Location where return value of
* user_function is put if terminated early
*
* RETURNS Return value from the user_function if terminated early.
* Otherwise, returns NULL.
*
* DESCRIPTION: Performs a modified depth-first walk of the namespace tree,
* starting (and ending) at the object specified by start_handle.
* The user_function is called whenever an object of type
* Device is found. If the user function returns
* a non-zero value, the search is terminated immediately and this
* value is returned to the caller.
*
* This is a wrapper for walk_namespace, but the callback performs
* additional filtering. Please see acpi_ns_get_device_callback.
*
******************************************************************************/
acpi_status
acpi_get_devices(const char *HID,
acpi_walk_callback user_function,
void *context, void **return_value)
{
acpi_status status;
struct acpi_get_devices_info info;
ACPI_FUNCTION_TRACE(acpi_get_devices);
/* Parameter validation */
if (!user_function) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/*
* We're going to call their callback from OUR callback, so we need
* to know what it is, and their context parameter.
*/
info.hid = HID;
info.context = context;
info.user_function = user_function;
/*
* Lock the namespace around the walk.
* The namespace will be unlocked/locked around each call
* to the user function - since this function
* must be allowed to make Acpi calls itself.
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ns_get_device_callback, NULL,
&info, return_value);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_devices)
/*******************************************************************************
*
* FUNCTION: acpi_attach_data
*
* PARAMETERS: obj_handle - Namespace node
* Handler - Handler for this attachment
* Data - Pointer to data to be attached
*
* RETURN: Status
*
* DESCRIPTION: Attach arbitrary data and handler to a namespace node.
*
******************************************************************************/
acpi_status
acpi_attach_data(acpi_handle obj_handle,
acpi_object_handler handler, void *data)
{
struct acpi_namespace_node *node;
acpi_status status;
/* Parameter validation */
if (!obj_handle || !handler || !data) {
return (AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert and validate the handle */
node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
status = acpi_ns_attach_data(node, handler, data);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_attach_data)
/*******************************************************************************
*
* FUNCTION: acpi_detach_data
*
* PARAMETERS: obj_handle - Namespace node handle
* Handler - Handler used in call to acpi_attach_data
*
* RETURN: Status
*
* DESCRIPTION: Remove data that was previously attached to a node.
*
******************************************************************************/
acpi_status
acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
{
struct acpi_namespace_node *node;
acpi_status status;
/* Parameter validation */
if (!obj_handle || !handler) {
return (AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert and validate the handle */
node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
status = acpi_ns_detach_data(node, handler);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_detach_data)
/*******************************************************************************
*
* FUNCTION: acpi_get_data
*
* PARAMETERS: obj_handle - Namespace node
* Handler - Handler used in call to attach_data
* Data - Where the data is returned
*
* RETURN: Status
*
* DESCRIPTION: Retrieve data that was previously attached to a namespace node.
*
******************************************************************************/
acpi_status
acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
{
struct acpi_namespace_node *node;
acpi_status status;
/* Parameter validation */
if (!obj_handle || !handler || !data) {
return (AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert and validate the handle */
node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
status = acpi_ns_get_attached_data(node, handler, data);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_get_data)
| gpl-2.0 |
richardtrip/bravo-2.6.32 | arch/mips/pci/pci-rc32434.c | 1504 | 7505 | /*
* BRIEF MODULE DESCRIPTION
* PCI initialization for IDT EB434 board
*
* Copyright 2004 IDT Inc. (rischelp@idt.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/* define an unsigned array for the PCI registers */
static unsigned int korina_cnfg_regs[25] = {
KORINA_CNFG1, KORINA_CNFG2, KORINA_CNFG3, KORINA_CNFG4,
KORINA_CNFG5, KORINA_CNFG6, KORINA_CNFG7, KORINA_CNFG8,
KORINA_CNFG9, KORINA_CNFG10, KORINA_CNFG11, KORINA_CNFG12,
KORINA_CNFG13, KORINA_CNFG14, KORINA_CNFG15, KORINA_CNFG16,
KORINA_CNFG17, KORINA_CNFG18, KORINA_CNFG19, KORINA_CNFG20,
KORINA_CNFG21, KORINA_CNFG22, KORINA_CNFG23, KORINA_CNFG24
};
static struct resource rc32434_res_pci_mem1;
static struct resource rc32434_res_pci_mem2;
static struct resource rc32434_res_pci_mem1 = {
.name = "PCI MEM1",
.start = 0x50000000,
.end = 0x5FFFFFFF,
.flags = IORESOURCE_MEM,
.parent = &rc32434_res_pci_mem1,
.sibling = NULL,
.child = &rc32434_res_pci_mem2
};
static struct resource rc32434_res_pci_mem2 = {
.name = "PCI Mem2",
.start = 0x60000000,
.end = 0x6FFFFFFF,
.flags = IORESOURCE_MEM,
.parent = &rc32434_res_pci_mem1,
.sibling = NULL,
.child = NULL
};
static struct resource rc32434_res_pci_io1 = {
.name = "PCI I/O1",
.start = 0x18800000,
.end = 0x188FFFFF,
.flags = IORESOURCE_IO,
};
extern struct pci_ops rc32434_pci_ops;
#define PCI_MEM1_START PCI_ADDR_START
#define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1)
#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1)
#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
#define PCI_IO1_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
#define PCI_IO2_START \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
#define PCI_IO2_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
struct pci_controller rc32434_controller2;
struct pci_controller rc32434_controller = {
.pci_ops = &rc32434_pci_ops,
.mem_resource = &rc32434_res_pci_mem1,
.io_resource = &rc32434_res_pci_io1,
.mem_offset = 0,
.io_offset = 0,
};
#ifdef __MIPSEB__
#define PCI_ENDIAN_FLAG PCILBAC_sb_m
#else
#define PCI_ENDIAN_FLAG 0
#endif
static int __init rc32434_pcibridge_init(void)
{
unsigned int pcicvalue, pcicdata = 0;
unsigned int dummyread, pcicntlval;
int loopCount;
unsigned int pci_config_addr;
pcicvalue = rc32434_pci->pcic;
pcicvalue = (pcicvalue >> PCIM_SHFT) & PCIM_BIT_LEN;
if (!((pcicvalue == PCIM_H_EA) ||
(pcicvalue == PCIM_H_IA_FIX) ||
(pcicvalue == PCIM_H_IA_RR))) {
pr_err(KERN_ERR "PCI init error!!!\n");
/* Not in Host Mode, return ERROR */
return -1;
}
/* Enables the Idle Grant mode, Arbiter Parking */
pcicdata |= (PCI_CTL_IGM | PCI_CTL_EAP | PCI_CTL_EN);
rc32434_pci->pcic = pcicdata; /* Enable the PCI bus Interface */
/* Zero out the PCI status & PCI Status Mask */
for (;;) {
pcicdata = rc32434_pci->pcis;
if (!(pcicdata & PCI_STAT_RIP))
break;
}
rc32434_pci->pcis = 0;
rc32434_pci->pcism = 0xFFFFFFFF;
/* Zero out the PCI decoupled registers */
rc32434_pci->pcidac = 0; /*
* disable PCI decoupled accesses at
* initialization
*/
rc32434_pci->pcidas = 0; /* clear the status */
rc32434_pci->pcidasm = 0x0000007F; /* Mask all the interrupts */
/* Mask PCI Messaging Interrupts */
rc32434_pci_msg->pciiic = 0;
rc32434_pci_msg->pciiim = 0xFFFFFFFF;
rc32434_pci_msg->pciioic = 0;
rc32434_pci_msg->pciioim = 0;
/* Setup PCILB0 as Memory Window */
rc32434_pci->pcilba[0].address = (unsigned int) (PCI_ADDR_START);
/* setup the PCI map address as same as the local address */
rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START);
/* Setup PCILBA1 as MEM */
rc32434_pci->pcilba[0].control =
(((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[0].control; /* flush the CPU write Buffers */
rc32434_pci->pcilba[1].address = 0x60000000;
rc32434_pci->pcilba[1].mapping = 0x60000000;
/* setup PCILBA2 as IO Window */
rc32434_pci->pcilba[1].control =
(((SIZE_256MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[1].control; /* flush the CPU write Buffers */
rc32434_pci->pcilba[2].address = 0x18C00000;
rc32434_pci->pcilba[2].mapping = 0x18FFFFFF;
/* setup PCILBA2 as IO Window */
rc32434_pci->pcilba[2].control =
(((SIZE_4MB & 0x1f) << PCI_LBAC_SIZE_BIT) | PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[2].control; /* flush the CPU write Buffers */
/* Setup PCILBA3 as IO Window */
rc32434_pci->pcilba[3].address = 0x18800000;
rc32434_pci->pcilba[3].mapping = 0x18800000;
rc32434_pci->pcilba[3].control =
((((SIZE_1MB & 0x1ff) << PCI_LBAC_SIZE_BIT) | PCI_LBAC_MSI) |
PCI_ENDIAN_FLAG);
dummyread = rc32434_pci->pcilba[3].control; /* flush the CPU write Buffers */
pci_config_addr = (unsigned int) (0x80000004);
for (loopCount = 0; loopCount < 24; loopCount++) {
rc32434_pci->pcicfga = pci_config_addr;
dummyread = rc32434_pci->pcicfga;
rc32434_pci->pcicfgd = korina_cnfg_regs[loopCount];
dummyread = rc32434_pci->pcicfgd;
pci_config_addr += 4;
}
rc32434_pci->pcitc =
(unsigned int) ((PCITC_RTIMER_VAL & 0xff) << PCI_TC_RTIMER_BIT) |
((PCITC_DTIMER_VAL & 0xff) << PCI_TC_DTIMER_BIT);
pcicntlval = rc32434_pci->pcic;
pcicntlval &= ~PCI_CTL_TNR;
rc32434_pci->pcic = pcicntlval;
pcicntlval = rc32434_pci->pcic;
return 0;
}
static int __init rc32434_pci_init(void)
{
void __iomem *io_map_base;
pr_info("PCI: Initializing PCI\n");
ioport_resource.start = rc32434_res_pci_io1.start;
ioport_resource.end = rc32434_res_pci_io1.end;
rc32434_pcibridge_init();
io_map_base = ioremap(rc32434_res_pci_io1.start,
rc32434_res_pci_io1.end - rc32434_res_pci_io1.start + 1);
if (!io_map_base)
return -ENOMEM;
rc32434_controller.io_map_base =
(unsigned long)io_map_base - rc32434_res_pci_io1.start;
register_pci_controller(&rc32434_controller);
rc32434_sync();
return 0;
}
arch_initcall(rc32434_pci_init);
| gpl-2.0 |
1tush/huawei_u8850_kernel_ics | arch/arm/kernel/return_address.c | 1504 | 1583 | /*
* arch/arm/kernel/return_address.c
*
* Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
* for Pengutronix
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/module.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
#include <asm/stacktrace.h>
struct return_address_data {
unsigned int level;
void *addr;
};
static int save_return_addr(struct stackframe *frame, void *d)
{
struct return_address_data *data = d;
if (!data->level) {
data->addr = (void *)frame->lr;
return 1;
} else {
--data->level;
return 0;
}
}
void *return_address(unsigned int level)
{
struct return_address_data data;
struct stackframe frame;
register unsigned long current_sp asm ("sp");
data.level = level + 1;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)return_address;
walk_stackframe(&frame, save_return_addr, &data);
if (!data.level)
return data.addr;
else
return NULL;
}
#else /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
#if defined(CONFIG_ARM_UNWIND)
#warning "TODO: return_address should use unwind tables"
#endif
void *return_address(unsigned int level)
{
return NULL;
}
#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
EXPORT_SYMBOL_GPL(return_address);
| gpl-2.0 |
Mazout360/lge-kernel-star | arch/alpha/kernel/sys_alcor.c | 2272 | 7732 | /*
* linux/arch/alpha/kernel/sys_alcor.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the ALCOR and XLT (XL-300/366/433).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for ENABLED irqs. */
static unsigned long cached_irq_mask;
static inline void
alcor_update_irq_hw(unsigned long mask)
{
*(vuip)GRU_INT_MASK = mask;
mb();
}
static inline void
alcor_enable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
}
static void
alcor_disable_irq(struct irq_data *d)
{
alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
}
static void
alcor_mask_and_ack_irq(struct irq_data *d)
{
alcor_disable_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static void
alcor_isa_mask_and_ack_irq(struct irq_data *d)
{
i8259a_mask_and_ack_irq(d);
/* On ALCOR/XLT, need to dismiss interrupt via GRU. */
*(vuip)GRU_INT_CLEAR = 0x80000000; mb();
*(vuip)GRU_INT_CLEAR = 0; mb();
}
static struct irq_chip alcor_irq_type = {
.name = "ALCOR",
.irq_unmask = alcor_enable_irq,
.irq_mask = alcor_disable_irq,
.irq_mask_ack = alcor_mask_and_ack_irq,
};
static void
alcor_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary register of the GRU */
pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS;
/*
* Now for every possible bit set, work through them and call
* the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 31) {
isa_device_interrupt(vector);
} else {
handle_irq(16 + i);
}
}
}
static void __init
alcor_init_irq(void)
{
long i;
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
*(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */
*(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */
*(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */
*(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */
for (i = 16; i < 48; ++i) {
/* On Alcor, at least, lines 20..30 are not connected
and can generate spurious interrupts if we turn them
on while IRQ probing. */
if (i >= 16+20 && i <= 16+30)
continue;
irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
init_i8259a_irqs();
common_init_isa_dma();
setup_irq(16+31, &isa_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* Summary @ GRU_INT_REQ:
* Bit Meaning
* 0 Interrupt Line A from slot 2
* 1 Interrupt Line B from slot 2
* 2 Interrupt Line C from slot 2
* 3 Interrupt Line D from slot 2
* 4 Interrupt Line A from slot 1
* 5 Interrupt line B from slot 1
* 6 Interrupt Line C from slot 1
* 7 Interrupt Line D from slot 1
* 8 Interrupt Line A from slot 0
* 9 Interrupt Line B from slot 0
*10 Interrupt Line C from slot 0
*11 Interrupt Line D from slot 0
*12 Interrupt Line A from slot 4
*13 Interrupt Line B from slot 4
*14 Interrupt Line C from slot 4
*15 Interrupt Line D from slot 4
*16 Interrupt Line D from slot 3
*17 Interrupt Line D from slot 3
*18 Interrupt Line D from slot 3
*19 Interrupt Line D from slot 3
*20-30 Reserved
*31 EISA interrupt
*
* The device to slot mapping looks like:
*
* Slot Device
* 6 built-in TULIP (XLT only)
* 7 PCI on board slot 0
* 8 PCI on board slot 3
* 9 PCI on board slot 4
* 10 PCEB (PCI-EISA bridge)
* 11 PCI on board slot 2
* 12 PCI on board slot 1
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
alcor_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[7][5] __initdata = {
/*INT INTA INTB INTC INTD */
/* note: IDSEL 17 is XLT only */
{16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
{ 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */
{16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */
{16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */
{ -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */
{ 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */
{ 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */
};
const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static void
alcor_kill_arch(int mode)
{
cia_kill_arch(mode);
#ifndef ALPHA_RESTORE_SRM_SETUP
switch(mode) {
case LINUX_REBOOT_CMD_RESTART:
/* Who said DEC engineer's have no sense of humor? ;-) */
if (alpha_using_srm) {
*(vuip) GRU_RESET = 0x0000dead;
mb();
}
break;
case LINUX_REBOOT_CMD_HALT:
break;
case LINUX_REBOOT_CMD_POWER_OFF:
break;
}
halt();
#endif
}
static void __init
alcor_init_pci(void)
{
struct pci_dev *dev;
cia_init_pci();
/*
* Now we can look to see if we are really running on an XLT-type
* motherboard, by looking for a 21040 TULIP in slot 6, which is
* built into XLT and BRET/MAVERICK, but not available on ALCOR.
*/
dev = pci_get_device(PCI_VENDOR_ID_DEC,
PCI_DEVICE_ID_DEC_TULIP,
NULL);
if (dev && dev->devfn == PCI_DEVFN(6,0)) {
alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS;
printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
__func__);
}
pci_dev_put(dev);
}
/*
* The System Vectors
*/
struct alpha_machine_vector alcor_mv __initmv = {
.vector_name = "Alcor",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = alcor_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = alcor_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alcor_init_pci,
.kill_arch = alcor_kill_arch,
.pci_map_irq = alcor_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .cia = {
.gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS
}}
};
ALIAS_MV(alcor)
struct alpha_machine_vector xlt_mv __initmv = {
.vector_name = "XLT",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 48,
.device_interrupt = alcor_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = alcor_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alcor_init_pci,
.kill_arch = alcor_kill_arch,
.pci_map_irq = alcor_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .cia = {
.gru_int_req_bits = XLT_GRU_INT_REQ_BITS
}}
};
/* No alpha_mv alias for XLT, since we compile it in unconditionally
with ALCOR; setup_arch knows how to cope. */
| gpl-2.0 |
infected-lp/android_kernel_sony_msm | net/mac80211/aes_cmac.c | 2528 | 3091 | /*
* AES-128-CMAC with TLen 16 for IEEE 802.11w BIP
* Copyright 2008, Jouni Malinen <j@w1.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/export.h>
#include <linux/err.h>
#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
#include "aes_cmac.h"
#define AES_CMAC_KEY_LEN 16
#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
#define AAD_LEN 20
static void gf_mulx(u8 *pad)
{
int i, carry;
carry = pad[0] & 0x80;
for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
pad[AES_BLOCK_SIZE - 1] <<= 1;
if (carry)
pad[AES_BLOCK_SIZE - 1] ^= 0x87;
}
static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
const u8 *pos, *end;
size_t i, e, left, total_len;
memset(cbc, 0, AES_BLOCK_SIZE);
total_len = 0;
for (e = 0; e < num_elem; e++)
total_len += len[e];
left = total_len;
e = 0;
pos = addr[0];
end = pos + len[0];
while (left >= AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
if (left > AES_BLOCK_SIZE)
crypto_cipher_encrypt_one(tfm, cbc, cbc);
left -= AES_BLOCK_SIZE;
}
memset(pad, 0, AES_BLOCK_SIZE);
crypto_cipher_encrypt_one(tfm, pad, pad);
gf_mulx(pad);
if (left || total_len == 0) {
for (i = 0; i < left; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
cbc[left] ^= 0x80;
gf_mulx(pad);
}
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
crypto_cipher_encrypt_one(tfm, pad, pad);
memcpy(mac, pad, CMAC_TLEN);
}
void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
const u8 *addr[3];
size_t len[3];
u8 zero[CMAC_TLEN];
memset(zero, 0, CMAC_TLEN);
addr[0] = aad;
len[0] = AAD_LEN;
addr[1] = data;
len[1] = data_len - CMAC_TLEN;
addr[2] = zero;
len[2] = CMAC_TLEN;
aes_128_cmac_vector(tfm, 3, addr, len, mic);
}
struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
{
struct crypto_cipher *tfm;
tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (!IS_ERR(tfm))
crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
return tfm;
}
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
{
crypto_free_cipher(tfm);
}
void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
u8 *k1, u8 *k2)
{
u8 l[AES_BLOCK_SIZE] = {};
struct ieee80211_key *key =
container_of(keyconf, struct ieee80211_key, conf);
crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
memcpy(k1, l, AES_BLOCK_SIZE);
gf_mulx(k1);
memcpy(k2, k1, AES_BLOCK_SIZE);
gf_mulx(k2);
}
EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.