repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
aznrice/l-preview | kernel/irq_work.c | 4683 | 3201 | /*
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context. The enqueueing is NMI-safe.
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <asm/processor.h>
/*
* An entry can be in one of four states:
*
* free NULL, 0 -> {claimed} : free to be used
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
/*
* Claim the entry so that no one else will poke at it.
*/
static bool irq_work_claim(struct irq_work *work)
{
unsigned long flags, nflags;
for (;;) {
flags = work->flags;
if (flags & IRQ_WORK_PENDING)
return false;
nflags = flags | IRQ_WORK_FLAGS;
if (cmpxchg(&work->flags, flags, nflags) == flags)
break;
cpu_relax();
}
return true;
}
void __weak arch_irq_work_raise(void)
{
/*
* Lame architectures will get the timer tick callback
*/
}
/*
* Queue the entry and raise the IPI if needed.
*/
static void __irq_work_queue(struct irq_work *work)
{
bool empty;
preempt_disable();
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/* The list was empty, raise self-interrupt to start processing. */
if (empty)
arch_irq_work_raise();
preempt_enable();
}
/*
* Enqueue the irq_work @entry, returns true on success, failure when the
* @entry was already enqueued by someone else.
*
* Can be re-enqueued while the callback is still in progress.
*/
bool irq_work_queue(struct irq_work *work)
{
if (!irq_work_claim(work)) {
/*
* Already enqueued, can't do!
*/
return false;
}
__irq_work_queue(work);
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
/*
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
* context with local IRQs disabled.
*/
void irq_work_run(void)
{
struct irq_work *work;
struct llist_head *this_list;
struct llist_node *llnode;
this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list))
return;
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
llnode = llist_del_all(this_list);
while (llnode != NULL) {
work = llist_entry(llnode, struct irq_work, llnode);
llnode = llist_next(llnode);
/*
* Clear the PENDING bit, after this point the @work
* can be re-used.
*/
work->flags = IRQ_WORK_BUSY;
work->func(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.
*/
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
}
}
EXPORT_SYMBOL_GPL(irq_work_run);
/*
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
*/
void irq_work_sync(struct irq_work *work)
{
WARN_ON_ONCE(irqs_disabled());
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
| gpl-2.0 |
AICP/kernel_lge_msm8974 | kernel/irq_work.c | 4683 | 3201 | /*
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context. The enqueueing is NMI-safe.
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <asm/processor.h>
/*
* An entry can be in one of four states:
*
* free NULL, 0 -> {claimed} : free to be used
* claimed NULL, 3 -> {pending} : claimed to be enqueued
* pending next, 3 -> {busy} : queued, pending callback
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
#define IRQ_WORK_PENDING 1UL
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
/*
* Claim the entry so that no one else will poke at it.
*/
static bool irq_work_claim(struct irq_work *work)
{
unsigned long flags, nflags;
for (;;) {
flags = work->flags;
if (flags & IRQ_WORK_PENDING)
return false;
nflags = flags | IRQ_WORK_FLAGS;
if (cmpxchg(&work->flags, flags, nflags) == flags)
break;
cpu_relax();
}
return true;
}
void __weak arch_irq_work_raise(void)
{
/*
* Lame architectures will get the timer tick callback
*/
}
/*
* Queue the entry and raise the IPI if needed.
*/
static void __irq_work_queue(struct irq_work *work)
{
bool empty;
preempt_disable();
empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
/* The list was empty, raise self-interrupt to start processing. */
if (empty)
arch_irq_work_raise();
preempt_enable();
}
/*
* Enqueue the irq_work @entry, returns true on success, failure when the
* @entry was already enqueued by someone else.
*
* Can be re-enqueued while the callback is still in progress.
*/
bool irq_work_queue(struct irq_work *work)
{
if (!irq_work_claim(work)) {
/*
* Already enqueued, can't do!
*/
return false;
}
__irq_work_queue(work);
return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);
/*
* Run the irq_work entries on this cpu. Requires to be ran from hardirq
* context with local IRQs disabled.
*/
void irq_work_run(void)
{
struct irq_work *work;
struct llist_head *this_list;
struct llist_node *llnode;
this_list = &__get_cpu_var(irq_work_list);
if (llist_empty(this_list))
return;
BUG_ON(!in_irq());
BUG_ON(!irqs_disabled());
llnode = llist_del_all(this_list);
while (llnode != NULL) {
work = llist_entry(llnode, struct irq_work, llnode);
llnode = llist_next(llnode);
/*
* Clear the PENDING bit, after this point the @work
* can be re-used.
*/
work->flags = IRQ_WORK_BUSY;
work->func(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.
*/
(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
}
}
EXPORT_SYMBOL_GPL(irq_work_run);
/*
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.
*/
void irq_work_sync(struct irq_work *work)
{
WARN_ON_ONCE(irqs_disabled());
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
| gpl-2.0 |
vuanhduy/odroidxu-3.4.y | drivers/media/video/tea6420.c | 7243 | 4447 | /*
tea6420 - i2c-driver for the tea6420 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
The tea6420 is a bus controlled audio-matrix with 5 stereo inputs,
4 stereo outputs and gain control for each output.
It is cascadable, i.e. it can be found at the addresses 0x98
and 0x9a on the i2c-bus.
For detailed informations download the specifications directly
from SGS Thomson at http://www.st.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include "tea6420.h"
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
MODULE_DESCRIPTION("tea6420 driver");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* make a connection between the input 'i' and the output 'o'
with gain 'g' (note: i = 6 means 'mute') */
static int tea6420_s_routing(struct v4l2_subdev *sd,
u32 i, u32 o, u32 config)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int g = (o >> 4) & 0xf;
u8 byte;
int ret;
o &= 0xf;
v4l2_dbg(1, debug, sd, "i=%d, o=%d, g=%d\n", i, o, g);
/* check if the parameters are valid */
if (i < 1 || i > 6 || o < 1 || o > 4 || g < 0 || g > 6 || g % 2 != 0)
return -EINVAL;
byte = ((o - 1) << 5);
byte |= (i - 1);
/* to understand this, have a look at the tea6420-specs (p.5) */
switch (g) {
case 0:
byte |= (3 << 3);
break;
case 2:
byte |= (2 << 3);
break;
case 4:
byte |= (1 << 3);
break;
case 6:
break;
}
ret = i2c_smbus_write_byte(client, byte);
if (ret) {
v4l2_dbg(1, debug, sd,
"i2c_smbus_write_byte() failed, ret:%d\n", ret);
return -EIO;
}
return 0;
}
static int tea6420_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TEA6420, 0);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops tea6420_core_ops = {
.g_chip_ident = tea6420_g_chip_ident,
};
static const struct v4l2_subdev_audio_ops tea6420_audio_ops = {
.s_routing = tea6420_s_routing,
};
static const struct v4l2_subdev_ops tea6420_ops = {
.core = &tea6420_core_ops,
.audio = &tea6420_audio_ops,
};
static int tea6420_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct v4l2_subdev *sd;
int err, i;
/* let's see whether this adapter can support what we need */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE))
return -EIO;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
v4l2_i2c_subdev_init(sd, client, &tea6420_ops);
/* set initial values: set "mute"-input to all outputs at gain 0 */
err = 0;
for (i = 1; i < 5; i++)
err += tea6420_s_routing(sd, 6, i, 0);
if (err) {
v4l_dbg(1, debug, client, "could not initialize tea6420\n");
return -ENODEV;
}
return 0;
}
static int tea6420_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(sd);
return 0;
}
static const struct i2c_device_id tea6420_id[] = {
{ "tea6420", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tea6420_id);
static struct i2c_driver tea6420_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "tea6420",
},
.probe = tea6420_probe,
.remove = tea6420_remove,
.id_table = tea6420_id,
};
module_i2c_driver(tea6420_driver);
| gpl-2.0 |
MattCrystal/drunken-avenger | arch/arm/mach-exynos/setup-keypad.c | 7755 | 1156 | /* linux/arch/arm/mach-exynos4/setup-keypad.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* GPIO configuration for Exynos4 KeyPad device
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <plat/gpio-cfg.h>
void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
{
/* Keypads can be of various combinations, Just making sure */
if (rows > 8) {
/* Set all the necessary GPX2 pins: KP_ROW[0~7] */
s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), 8, S3C_GPIO_SFN(3),
S3C_GPIO_PULL_UP);
/* Set all the necessary GPX3 pins: KP_ROW[8~] */
s3c_gpio_cfgall_range(EXYNOS4_GPX3(0), (rows - 8),
S3C_GPIO_SFN(3), S3C_GPIO_PULL_UP);
} else {
/* Set all the necessary GPX2 pins: KP_ROW[x] */
s3c_gpio_cfgall_range(EXYNOS4_GPX2(0), rows, S3C_GPIO_SFN(3),
S3C_GPIO_PULL_UP);
}
/* Set all the necessary GPX1 pins to special-function 3: KP_COL[x] */
s3c_gpio_cfgrange_nopull(EXYNOS4_GPX1(0), cols, S3C_GPIO_SFN(3));
}
| gpl-2.0 |
weritos666/ARCHOS_50_Platinum | drivers/gpu/drm/gma500/accel_2d.c | 8011 | 9712 | /**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
* develop this driver.
*
**************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include "psb_drv.h"
#include "psb_reg.h"
#include "framebuffer.h"
/**
* psb_spank - reset the 2D engine
* @dev_priv: our PSB DRM device
*
* Soft reset the graphics engine and then reload the necessary registers.
* We use this at initialisation time but it will become relevant for
* accelerated X later
*/
void psb_spank(struct drm_psb_private *dev_priv)
{
PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
PSB_RSGX32(PSB_CR_SOFT_RESET);
msleep(1);
PSB_WSGX32(0, PSB_CR_SOFT_RESET);
wmb();
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
wmb();
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
msleep(1);
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
}
/**
* psb2_2d_wait_available - wait for FIFO room
* @dev_priv: our DRM device
* @size: size (in dwords) of the command we want to issue
*
* Wait until there is room to load the FIFO with our data. If the
* device is not responding then reset it
*/
static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
unsigned size)
{
uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
unsigned long t = jiffies + HZ;
while (avail < size) {
avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
if (time_after(jiffies, t)) {
psb_spank(dev_priv);
return -EIO;
}
}
return 0;
}
/**
* psb_2d_submit - submit a 2D command
* @dev_priv: our DRM device
* @cmdbuf: command to issue
* @size: length (in dwords)
*
* Issue one or more 2D commands to the accelerator. This needs to be
* serialized later when we add the GEM interfaces for acceleration
*/
static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
unsigned size)
{
int ret = 0;
int i;
unsigned submit_size;
unsigned long flags;
spin_lock_irqsave(&dev_priv->lock_2d, flags);
while (size > 0) {
submit_size = (size < 0x60) ? size : 0x60;
size -= submit_size;
ret = psb_2d_wait_available(dev_priv, submit_size);
if (ret)
break;
submit_size <<= 2;
for (i = 0; i < submit_size; i += 4)
PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
(void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
}
spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
return ret;
}
/**
* psb_accel_2d_copy_direction - compute blit order
* @xdir: X direction of move
* @ydir: Y direction of move
*
* Compute the correct order setings to ensure that an overlapping blit
* correctly copies all the pixels.
*/
static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
{
if (xdir < 0)
return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
PSB_2D_COPYORDER_TR2BL;
else
return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
PSB_2D_COPYORDER_TL2BR;
}
/**
* psb_accel_2d_copy - accelerated 2D copy
* @dev_priv: our DRM device
* @src_offset in bytes
* @src_stride in bytes
* @src_format psb 2D format defines
* @dst_offset in bytes
* @dst_stride in bytes
* @dst_format psb 2D format defines
* @src_x offset in pixels
* @src_y offset in pixels
* @dst_x offset in pixels
* @dst_y offset in pixels
* @size_x of the copied area
* @size_y of the copied area
*
* Format and issue a 2D accelerated copy command.
*/
static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
uint32_t src_offset, uint32_t src_stride,
uint32_t src_format, uint32_t dst_offset,
uint32_t dst_stride, uint32_t dst_format,
uint16_t src_x, uint16_t src_y,
uint16_t dst_x, uint16_t dst_y,
uint16_t size_x, uint16_t size_y)
{
uint32_t blit_cmd;
uint32_t buffer[10];
uint32_t *buf;
uint32_t direction;
buf = buffer;
direction =
psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
if (direction == PSB_2D_COPYORDER_BR2TL ||
direction == PSB_2D_COPYORDER_TR2BL) {
src_x += size_x - 1;
dst_x += size_x - 1;
}
if (direction == PSB_2D_COPYORDER_BR2TL ||
direction == PSB_2D_COPYORDER_BL2TR) {
src_y += size_y - 1;
dst_y += size_y - 1;
}
blit_cmd =
PSB_2D_BLIT_BH |
PSB_2D_ROT_NONE |
PSB_2D_DSTCK_DISABLE |
PSB_2D_SRCCK_DISABLE |
PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
*buf++ = PSB_2D_FENCE_BH;
*buf++ =
PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
PSB_2D_DST_STRIDE_SHIFT);
*buf++ = dst_offset;
*buf++ =
PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
PSB_2D_SRC_STRIDE_SHIFT);
*buf++ = src_offset;
*buf++ =
PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
(src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
*buf++ = blit_cmd;
*buf++ =
(dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
PSB_2D_DST_YSTART_SHIFT);
*buf++ =
(size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
PSB_2D_DST_YSIZE_SHIFT);
*buf++ = PSB_2D_FLUSH_BH;
return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
}
/**
* psbfb_copyarea_accel - copyarea acceleration for /dev/fb
* @info: our framebuffer
* @a: copyarea parameters from the framebuffer core
*
* Perform a 2D copy via the accelerator
*/
static void psbfb_copyarea_accel(struct fb_info *info,
const struct fb_copyarea *a)
{
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t offset;
uint32_t stride;
uint32_t src_format;
uint32_t dst_format;
if (!fb)
return;
offset = psbfb->gtt->offset;
stride = fb->pitches[0];
switch (fb->depth) {
case 8:
src_format = PSB_2D_SRC_332RGB;
dst_format = PSB_2D_DST_332RGB;
break;
case 15:
src_format = PSB_2D_SRC_555RGB;
dst_format = PSB_2D_DST_555RGB;
break;
case 16:
src_format = PSB_2D_SRC_565RGB;
dst_format = PSB_2D_DST_565RGB;
break;
case 24:
case 32:
/* this is wrong but since we don't do blending its okay */
src_format = PSB_2D_SRC_8888ARGB;
dst_format = PSB_2D_DST_8888ARGB;
break;
default:
/* software fallback */
cfb_copyarea(info, a);
return;
}
if (!gma_power_begin(dev, false)) {
cfb_copyarea(info, a);
return;
}
psb_accel_2d_copy(dev_priv,
offset, stride, src_format,
offset, stride, dst_format,
a->sx, a->sy, a->dx, a->dy, a->width, a->height);
gma_power_end(dev);
}
/**
* psbfb_copyarea - 2D copy interface
* @info: our framebuffer
* @region: region to copy
*
* Copy an area of the framebuffer console either by the accelerator
* or directly using the cfb helpers according to the request
*/
void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region)
{
if (unlikely(info->state != FBINFO_STATE_RUNNING))
return;
/* Avoid the 8 pixel erratum */
if (region->width == 8 || region->height == 8 ||
(info->flags & FBINFO_HWACCEL_DISABLED))
return cfb_copyarea(info, region);
psbfb_copyarea_accel(info, region);
}
/**
* psbfb_sync - synchronize 2D
* @info: our framebuffer
*
* Wait for the 2D engine to quiesce so that we can do CPU
* access to the framebuffer again
*/
int psbfb_sync(struct fb_info *info)
{
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long _end = jiffies + DRM_HZ;
int busy = 0;
unsigned long flags;
spin_lock_irqsave(&dev_priv->lock_2d, flags);
/*
* First idle the 2D engine.
*/
if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
goto out;
do {
busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
cpu_relax();
} while (busy && !time_after_eq(jiffies, _end));
if (busy)
busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
if (busy)
goto out;
do {
busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
_PSB_C2B_STATUS_BUSY) != 0);
cpu_relax();
} while (busy && !time_after_eq(jiffies, _end));
if (busy)
busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
_PSB_C2B_STATUS_BUSY) != 0);
out:
spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
return (busy) ? -EBUSY : 0;
}
| gpl-2.0 |
Caio99BR/FalconSSKernel | arch/mips/loongson/common/serial.c | 8779 | 2024 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Yan hua (yanhua@lemote.com)
* Author: Wu Zhangjin (wuzhangjin@gmail.com)
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <machine.h>
#define PORT(int) \
{ \
.irq = int, \
.uartclk = 1843200, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
#define PORT_M(int) \
{ \
.irq = MIPS_CPU_IRQ_BASE + (int), \
.uartclk = 3686400, \
.iotype = UPIO_MEM, \
.membase = (void __iomem *)NULL, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
static struct plat_serial8250_port uart8250_data[][2] = {
[MACH_LOONGSON_UNKNOWN] {},
[MACH_LEMOTE_FL2E] {PORT(4), {} },
[MACH_LEMOTE_FL2F] {PORT(3), {} },
[MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
[MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
[MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
[MACH_LEMOTE_NAS] {PORT_M(3), {} },
[MACH_LEMOTE_LL2F] {PORT(3), {} },
[MACH_LOONGSON_END] {},
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
};
static int __init serial_init(void)
{
unsigned char iotype;
iotype = uart8250_data[mips_machtype][0].iotype;
if (UPIO_MEM == iotype)
uart8250_data[mips_machtype][0].membase =
(void __iomem *)_loongson_uart_base;
else if (UPIO_PORT == iotype)
uart8250_data[mips_machtype][0].iobase =
loongson_uart_base - LOONGSON_PCIIO_BASE;
uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
return platform_device_register(&uart8250_device);
}
device_initcall(serial_init);
| gpl-2.0 |
sudosurootdev/kernel_lge_ls980 | drivers/input/touchscreen/mk712.c | 12875 | 5812 | /*
* ICS MK712 touchscreen controller driver
*
* Copyright (c) 1999-2002 Transmeta Corporation
* Copyright (c) 2005 Rick Koch <n1gp@hotmail.com>
* Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
/*
* This driver supports the ICS MicroClock MK712 TouchScreen controller,
* found in Gateway AOL Connected Touchpad computers.
*
* Documentation for ICS MK712 can be found at:
* http://www.idt.com/products/getDoc.cfm?docID=18713923
*/
/*
* 1999-12-18: original version, Daniel Quinlan
* 1999-12-19: added anti-jitter code, report pen-up events, fixed mk712_poll
* to use queue_empty, Nathan Laredo
* 1999-12-20: improved random point rejection, Nathan Laredo
* 2000-01-05: checked in new anti-jitter code, changed mouse protocol, fixed
* queue code, added module options, other fixes, Daniel Quinlan
* 2002-03-15: Clean up for kernel merge <alan@redhat.com>
* Fixed multi open race, fixed memory checks, fixed resource
* allocation, fixed close/powerdown bug, switched to new init
* 2005-01-18: Ported to 2.6 from 2.4.28, Rick Koch
* 2005-02-05: Rewritten for the input layer, Vojtech Pavlik
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <asm/io.h>
MODULE_AUTHOR("Daniel Quinlan <quinlan@pathname.com>, Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION("ICS MicroClock MK712 TouchScreen driver");
MODULE_LICENSE("GPL");
static unsigned int mk712_io = 0x260; /* Also 0x200, 0x208, 0x300 */
module_param_named(io, mk712_io, uint, 0);
MODULE_PARM_DESC(io, "I/O base address of MK712 touchscreen controller");
static unsigned int mk712_irq = 10; /* Also 12, 14, 15 */
module_param_named(irq, mk712_irq, uint, 0);
MODULE_PARM_DESC(irq, "IRQ of MK712 touchscreen controller");
/* eight 8-bit registers */
#define MK712_STATUS 0
#define MK712_X 2
#define MK712_Y 4
#define MK712_CONTROL 6
#define MK712_RATE 7
/* status */
#define MK712_STATUS_TOUCH 0x10
#define MK712_CONVERSION_COMPLETE 0x80
/* control */
#define MK712_ENABLE_INT 0x01
#define MK712_INT_ON_CONVERSION_COMPLETE 0x02
#define MK712_INT_ON_CHANGE_IN_TOUCH_STATUS 0x04
#define MK712_ENABLE_PERIODIC_CONVERSIONS 0x10
#define MK712_READ_ONE_POINT 0x20
#define MK712_POWERUP 0x40
static struct input_dev *mk712_dev;
static DEFINE_SPINLOCK(mk712_lock);
static irqreturn_t mk712_interrupt(int irq, void *dev_id)
{
unsigned char status;
static int debounce = 1;
static unsigned short last_x;
static unsigned short last_y;
spin_lock(&mk712_lock);
status = inb(mk712_io + MK712_STATUS);
if (~status & MK712_CONVERSION_COMPLETE) {
debounce = 1;
goto end;
}
if (~status & MK712_STATUS_TOUCH) {
debounce = 1;
input_report_key(mk712_dev, BTN_TOUCH, 0);
goto end;
}
if (debounce) {
debounce = 0;
goto end;
}
input_report_key(mk712_dev, BTN_TOUCH, 1);
input_report_abs(mk712_dev, ABS_X, last_x);
input_report_abs(mk712_dev, ABS_Y, last_y);
end:
last_x = inw(mk712_io + MK712_X) & 0x0fff;
last_y = inw(mk712_io + MK712_Y) & 0x0fff;
input_sync(mk712_dev);
spin_unlock(&mk712_lock);
return IRQ_HANDLED;
}
static int mk712_open(struct input_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&mk712_lock, flags);
outb(0, mk712_io + MK712_CONTROL); /* Reset */
outb(MK712_ENABLE_INT | MK712_INT_ON_CONVERSION_COMPLETE |
MK712_INT_ON_CHANGE_IN_TOUCH_STATUS |
MK712_ENABLE_PERIODIC_CONVERSIONS |
MK712_POWERUP, mk712_io + MK712_CONTROL);
outb(10, mk712_io + MK712_RATE); /* 187 points per second */
spin_unlock_irqrestore(&mk712_lock, flags);
return 0;
}
static void mk712_close(struct input_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&mk712_lock, flags);
outb(0, mk712_io + MK712_CONTROL);
spin_unlock_irqrestore(&mk712_lock, flags);
}
static int __init mk712_init(void)
{
int err;
if (!request_region(mk712_io, 8, "mk712")) {
printk(KERN_WARNING "mk712: unable to get IO region\n");
return -ENODEV;
}
outb(0, mk712_io + MK712_CONTROL);
if ((inw(mk712_io + MK712_X) & 0xf000) || /* Sanity check */
(inw(mk712_io + MK712_Y) & 0xf000) ||
(inw(mk712_io + MK712_STATUS) & 0xf333)) {
printk(KERN_WARNING "mk712: device not present\n");
err = -ENODEV;
goto fail1;
}
mk712_dev = input_allocate_device();
if (!mk712_dev) {
printk(KERN_ERR "mk712: not enough memory\n");
err = -ENOMEM;
goto fail1;
}
mk712_dev->name = "ICS MicroClock MK712 TouchScreen";
mk712_dev->phys = "isa0260/input0";
mk712_dev->id.bustype = BUS_ISA;
mk712_dev->id.vendor = 0x0005;
mk712_dev->id.product = 0x0001;
mk712_dev->id.version = 0x0100;
mk712_dev->open = mk712_open;
mk712_dev->close = mk712_close;
mk712_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
mk712_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(mk712_dev, ABS_X, 0, 0xfff, 88, 0);
input_set_abs_params(mk712_dev, ABS_Y, 0, 0xfff, 88, 0);
if (request_irq(mk712_irq, mk712_interrupt, 0, "mk712", mk712_dev)) {
printk(KERN_WARNING "mk712: unable to get IRQ\n");
err = -EBUSY;
goto fail1;
}
err = input_register_device(mk712_dev);
if (err)
goto fail2;
return 0;
fail2: free_irq(mk712_irq, mk712_dev);
fail1: input_free_device(mk712_dev);
release_region(mk712_io, 8);
return err;
}
static void __exit mk712_exit(void)
{
input_unregister_device(mk712_dev);
free_irq(mk712_irq, mk712_dev);
release_region(mk712_io, 8);
}
module_init(mk712_init);
module_exit(mk712_exit);
| gpl-2.0 |
wozgeass/Raspberry-RT | arch/sparc/kernel/iommu.c | 76 | 19350 | /* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#include <linux/bitmap.h>
#include <linux/iommu-common.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include <asm/iommu.h>
#include "iommu_common.h"
#include "kernel.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
#define STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#define iommu_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define iommu_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static void iommu_flushall(struct iommu_map_table *iommu_map_table)
{
struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_tags;
for (entry = 0; entry < 16; entry++) {
iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
(IOPTE_VALID | IOPTE_CACHE | \
(((CTX) << 47) & IOPTE_CONTEXT))
#define IOPTE_STREAMING(CTX) \
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page.
*/
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
val &= ~IOPTE_PAGE;
val |= iommu->dummy_page_pa;
iopte_val(*iopte) = val;
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
{
unsigned long i, order, sz, num_tsb_entries;
struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->tbl.table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->tbl.map)
return -ENOMEM;
memset(iommu->tbl.map, 0, sz);
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
(tlb_type != hypervisor ? iommu_flushall : NULL),
false, 1, false);
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
return 0;
out_free_dummy_page:
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
out_free_map:
kfree(iommu->tbl.map);
iommu->tbl.map = NULL;
return -ENOMEM;
}
static inline iopte_t *alloc_npages(struct device *dev,
struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
if (unlikely(n == IOMMU_NUM_CTXS)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
unsigned long order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
iopte_t *iopte;
void *ret;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
*dma_addrp = (iommu->tbl.table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
return ret;
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
unsigned long attrs)
{
struct iommu *iommu;
unsigned long order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = alloc_npages(dev, iommu, npages);
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(!base))
goto bad;
bus_addr = (iommu->tbl.table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
return ret;
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
u32 vaddr, unsigned long ctx, unsigned long npages,
enum dma_data_direction direction)
{
int limit;
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
iommu_write(flushreg, ctx);
val = iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
iommu_write(flushreg, ctx);
val >>= 1;
}
val = iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "strbuf_flush: ctx flush "
"timeout matchreg[%llx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == DMA_TO_DEVICE)
return;
STC_FLUSHFLAG_INIT(strbuf);
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
rmb();
}
if (!limit)
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
/* Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
/* Step 2: Clear out TSB entries. */
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
return 0;
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
prot = IOPTE_STREAMING(ctx);
else
prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
prot |= IOPTE_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->tbl.table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
entry = (vaddr - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
struct iommu_map_table *tbl = &iommu->tbl;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf;
struct iommu *iommu;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
ctx = fetch_sg_ctx(iommu, sglist);
spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
entry = ((dma_handle - iommu->tbl.table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_single_for_cpu(struct device *dev,
dma_addr_t bus_addr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table +
((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
struct iommu_map_table *tbl = &iommu->tbl;
iopte = iommu->page_table + ((sglist[0].dma_address -
tbl->table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
.map_page = dma_4u_map_page,
.unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
int dma_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask >= (1UL << 32UL))
return 0;
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
#ifdef CONFIG_PCI
if (dev_is_pci(dev))
return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
return 0;
}
EXPORT_SYMBOL(dma_supported);
| gpl-2.0 |
gic4107/HSA-linux | drivers/staging/rtl8712/rtl871x_ioctl_linux.c | 76 | 69796 | /******************************************************************************
* rtl871x_ioctl_linux.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RTL871X_IOCTL_LINUX_C_
#define _RTL871X_MP_IOCTL_C_
#include "osdep_service.h"
#include "drv_types.h"
#include "wlan_bssdef.h"
#include "rtl871x_debug.h"
#include "wifi.h"
#include "rtl871x_mlme.h"
#include "rtl871x_ioctl.h"
#include "rtl871x_ioctl_set.h"
#include "rtl871x_mp_ioctl.h"
#include "mlme_osdep.h"
#include <linux/wireless.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/semaphore.h>
#include <net/iw_handler.h>
#include <linux/if_arp.h>
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 0x1E)
#define SCAN_ITEM_SIZE 768
#define MAX_CUSTOM_LEN 64
#define RATE_COUNT 4
static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000,
24000000, 36000000, 48000000, 54000000};
static const long ieee80211_wlan_frequencies[] = {
2412, 2417, 2422, 2427,
2432, 2437, 2442, 2447,
2452, 2457, 2462, 2467,
2472, 2484
};
static const char * const iw_operation_mode[] = {
"Auto", "Ad-Hoc", "Managed", "Master", "Repeater", "Secondary",
"Monitor"
};
/**
* hwaddr_aton - Convert ASCII string to MAC address
* @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
* @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
* Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
*/
static int hwaddr_aton_i(const char *txt, u8 *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex_to_bin(*txt++);
if (a < 0)
return -1;
b = hex_to_bin(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
void r8712_indicate_wx_assoc_event(struct _adapter *padapter)
{
union iwreq_data wrqu;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress,
ETH_ALEN);
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
void r8712_indicate_wx_disassoc_event(struct _adapter *padapter)
{
union iwreq_data wrqu;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
static inline void handle_pairwise_key(struct sta_info *psta,
struct ieee_param *param,
struct _adapter *padapter)
{
/* pairwise key */
memcpy(psta->x_UncstKey.skey, param->u.crypt.key,
(param->u.crypt. key_len > 16 ? 16 : param->u.crypt.key_len));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
memcpy(psta->tkiptxmickey. skey, &(param->u.crypt.
key[16]), 8);
memcpy(psta->tkiprxmickey. skey, &(param->u.crypt.
key[24]), 8);
padapter->securitypriv. busetkipkey = false;
_set_timer(&padapter->securitypriv.tkip_timer, 50);
}
r8712_setstakey_cmd(padapter, (unsigned char *)psta, true);
}
static inline void handle_group_key(struct ieee_param *param,
struct _adapter *padapter)
{
if (0 < param->u.crypt.idx &&
param->u.crypt.idx < 3) {
/* group key idx is 1 or 2 */
memcpy(padapter->securitypriv.XGrpKey[param->u.crypt.
idx-1].skey, param->u.crypt.key, (param->u.crypt.key_len
> 16 ? 16 : param->u.crypt.key_len));
memcpy(padapter->securitypriv.XGrptxmickey[param->
u.crypt.idx-1].skey, &(param->u.crypt.key[16]), 8);
memcpy(padapter->securitypriv. XGrprxmickey[param->
u.crypt.idx-1].skey, &(param->u.crypt.key[24]), 8);
padapter->securitypriv.binstallGrpkey = true;
r8712_set_key(padapter, &padapter->securitypriv,
param->u.crypt.idx);
if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) {
if (padapter->registrypriv.power_mgnt != padapter->
pwrctrlpriv.pwr_mode)
_set_timer(&(padapter->mlmepriv.dhcp_timer),
60000);
}
}
}
static inline char *translate_scan(struct _adapter *padapter,
struct iw_request_info *info,
struct wlan_network *pnetwork,
char *start, char *stop)
{
struct iw_event iwe;
struct ieee80211_ht_cap *pht_capie;
char *current_val;
s8 *p;
u32 i = 0, ht_ielen = 0;
u16 cap, ht_cap = false, mcs_rate;
u8 rssi, bw_40MHz = 0, short_GI = 0;
if ((pnetwork->network.Configuration.DSConfig < 1) ||
(pnetwork->network.Configuration.DSConfig > 14)) {
if (pnetwork->network.Configuration.DSConfig < 1)
pnetwork->network.Configuration.DSConfig = 1;
else
pnetwork->network.Configuration.DSConfig = 14;
}
/* AP MAC address */
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN);
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
/* Add the ESSID */
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
iwe.u.data.length = min_t(u32, pnetwork->network.Ssid.SsidLength, 32);
start = iwe_stream_add_point(info, start, stop, &iwe,
pnetwork->network.Ssid.Ssid);
/* parsing HT_CAP_IE */
p = r8712_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_,
&ht_ielen, pnetwork->network.IELength - 12);
if (p && ht_ielen > 0) {
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH)
? 1 : 0;
short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
/* Add the protocol name */
iwe.cmd = SIOCGIWNAME;
if ((r8712_is_cckratesonly_included((u8 *)&pnetwork->network.
SupportedRates)) == true) {
if (ht_cap == true)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
} else if ((r8712_is_cckrates_included((u8 *)&pnetwork->network.
SupportedRates)) == true) {
if (ht_cap == true)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
} else {
if (ht_cap == true)
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
}
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
/* Add mode */
iwe.cmd = SIOCGIWMODE;
memcpy((u8 *)&cap, r8712_get_capability_from_ie(pnetwork->network.IEs),
2);
cap = le16_to_cpu(cap);
if (cap & (WLAN_CAPABILITY_IBSS|WLAN_CAPABILITY_BSS)) {
if (cap & WLAN_CAPABILITY_BSS)
iwe.u.mode = (u32)IW_MODE_MASTER;
else
iwe.u.mode = (u32)IW_MODE_ADHOC;
start = iwe_stream_add_event(info, start, stop, &iwe,
IW_EV_UINT_LEN);
}
/* Add frequency/channel */
iwe.cmd = SIOCGIWFREQ;
{
/* check legal index */
u8 dsconfig = pnetwork->network.Configuration.DSConfig;
if (dsconfig >= 1 && dsconfig <= sizeof(
ieee80211_wlan_frequencies) / sizeof(long))
iwe.u.freq.m = (s32)(ieee80211_wlan_frequencies[
pnetwork->network.Configuration.
DSConfig - 1] * 100000);
else
iwe.u.freq.m = 0;
}
iwe.u.freq.e = (s16)1;
iwe.u.freq.i = (u8)pnetwork->network.Configuration.DSConfig;
start = iwe_stream_add_event(info, start, stop, &iwe,
IW_EV_FREQ_LEN);
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
if (cap & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags = (u16)(IW_ENCODE_ENABLED |
IW_ENCODE_NOKEY);
else
iwe.u.data.flags = (u16)(IW_ENCODE_DISABLED);
iwe.u.data.length = (u16)0;
start = iwe_stream_add_point(info, start, stop, &iwe,
pnetwork->network.Ssid.Ssid);
/*Add basic and extended rates */
current_val = start + iwe_stream_lcp_len(info);
iwe.cmd = SIOCGIWRATE;
iwe.u.bitrate.fixed = 0;
iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = 0;
i = 0;
while (pnetwork->network.SupportedRates[i] != 0) {
/* Bit rate given in 500 kb/s units */
iwe.u.bitrate.value = (pnetwork->network.SupportedRates[i++] &
0x7F) * 500000;
current_val = iwe_stream_add_value(info, start, current_val,
stop, &iwe, IW_EV_PARAM_LEN);
}
/* Check if we added any event */
if ((current_val - start) > iwe_stream_lcp_len(info))
start = current_val;
/* parsing WPA/WPA2 IE */
{
u8 buf[MAX_WPA_IE_LEN];
u8 wpa_ie[255], rsn_ie[255];
u16 wpa_len = 0, rsn_len = 0;
int n;
sint out_len = 0;
out_len = r8712_get_sec_ie(pnetwork->network.IEs,
pnetwork->network.
IELength, rsn_ie, &rsn_len,
wpa_ie, &wpa_len);
if (wpa_len > 0) {
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
for (i = 0; i < wpa_len; i++) {
n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", wpa_ie[i]);
if (n >= MAX_WPA_IE_LEN)
break;
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = (u16)strlen(buf);
start = iwe_stream_add_point(info, start, stop,
&iwe, buf);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = (u16)wpa_len;
start = iwe_stream_add_point(info, start, stop,
&iwe, wpa_ie);
}
if (rsn_len > 0) {
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "rsn_ie=");
for (i = 0; i < rsn_len; i++) {
n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", rsn_ie[i]);
if (n >= MAX_WPA_IE_LEN)
break;
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
iwe.u.data.length = strlen(buf);
start = iwe_stream_add_point(info, start, stop,
&iwe, buf);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = rsn_len;
start = iwe_stream_add_point(info, start, stop, &iwe,
rsn_ie);
}
}
{ /* parsing WPS IE */
u8 wps_ie[512];
uint wps_ielen;
if (r8712_get_wps_ie(pnetwork->network.IEs,
pnetwork->network.IELength,
wps_ie, &wps_ielen) == true) {
if (wps_ielen > 2) {
iwe.cmd = IWEVGENIE;
iwe.u.data.length = (u16)wps_ielen;
start = iwe_stream_add_point(info, start, stop,
&iwe, wps_ie);
}
}
}
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi);
/* we only update signal_level (signal strength) that is rssi. */
iwe.u.qual.updated = (u8)(IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED |
IW_QUAL_NOISE_INVALID);
iwe.u.qual.level = rssi; /* signal strength */
iwe.u.qual.qual = 0; /* signal quality */
iwe.u.qual.noise = 0; /* noise level */
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
/* how to translate rssi to ?% */
return start;
}
static int wpa_set_auth_algs(struct net_device *dev, u32 value)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
int ret = 0;
if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeAutoSwitch;
padapter->securitypriv.AuthAlgrthm = 3;
} else if (value & AUTH_ALG_SHARED_KEY) {
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
padapter->securitypriv.AuthAlgrthm = 1;
} else if (value & AUTH_ALG_OPEN_SYSTEM) {
if (padapter->securitypriv.ndisauthtype <
Ndis802_11AuthModeWPAPSK) {
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeOpen;
padapter->securitypriv.AuthAlgrthm = 0;
}
} else
ret = -EINVAL;
return ret;
}
static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
u32 param_len)
{
int ret = 0;
u32 wep_key_idx, wep_key_len = 0;
struct NDIS_802_11_WEP *pwep = NULL;
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len != (u32)((u8 *) param->u.crypt.key - (u8 *)param) +
param->u.crypt.key_len)
return -EINVAL;
if (is_broadcast_ether_addr(param->sta_addr)) {
if (param->u.crypt.idx >= WEP_KEYS) {
/* for large key indices, set the default (0) */
param->u.crypt.idx = 0;
}
} else
return -EINVAL;
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
netdev_info(dev, "r8712u: %s: crypt.alg = WEP\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.XGrpPrivacy = _WEP40_;
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
if (wep_key_idx >= WEP_KEYS)
wep_key_idx = 0;
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
pwep = (struct NDIS_802_11_WEP *)_malloc((u32)
(wep_key_len +
FIELD_OFFSET(struct NDIS_802_11_WEP,
KeyMaterial)));
if (pwep == NULL)
return -ENOMEM;
memset(pwep, 0, sizeof(struct NDIS_802_11_WEP));
pwep->KeyLength = wep_key_len;
pwep->Length = wep_key_len +
FIELD_OFFSET(struct NDIS_802_11_WEP,
KeyMaterial);
if (wep_key_len == 13) {
padapter->securitypriv.PrivacyAlgrthm =
_WEP104_;
padapter->securitypriv.XGrpPrivacy =
_WEP104_;
}
} else
return -EINVAL;
pwep->KeyIndex = wep_key_idx;
pwep->KeyIndex |= 0x80000000;
memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
if (param->u.crypt.set_tx) {
if (r8712_set_802_11_add_wep(padapter, pwep) ==
(u8)_FAIL)
ret = -EOPNOTSUPP;
} else {
/* don't update "psecuritypriv->PrivacyAlgrthm" and
* "psecuritypriv->PrivacyKeyIndex=keyid", but can
* r8712_set_key to fw/cam
*/
if (wep_key_idx >= WEP_KEYS) {
ret = -EOPNOTSUPP;
goto exit;
}
memcpy(&(psecuritypriv->DefKey[wep_key_idx].
skey[0]), pwep->KeyMaterial,
pwep->KeyLength);
psecuritypriv->DefKeylen[wep_key_idx] =
pwep->KeyLength;
r8712_set_key(padapter, psecuritypriv, wep_key_idx);
}
goto exit;
}
if (padapter->securitypriv.AuthAlgrthm == 2) { /* 802_1x */
struct sta_info *psta, *pbcmc_sta;
struct sta_priv *pstapriv = &padapter->stapriv;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE |
WIFI_MP_STATE) == true) { /* sta mode */
psta = r8712_get_stainfo(pstapriv,
get_bssid(pmlmepriv));
if (psta) {
psta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption3Enabled))
psta->XPrivacy = padapter->
securitypriv.PrivacyAlgrthm;
if (param->u.crypt.set_tx == 1)
handle_pairwise_key(psta, param,
padapter);
else /* group key */
handle_group_key(param, padapter);
}
pbcmc_sta = r8712_get_bcmc_stainfo(padapter);
if (pbcmc_sta) {
pbcmc_sta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption2Enabled) ||
(padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption3Enabled))
pbcmc_sta->XPrivacy =
padapter->securitypriv.
PrivacyAlgrthm;
}
}
}
exit:
kfree((u8 *)pwep);
return ret;
}
static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
unsigned short ielen)
{
u8 *buf = NULL, *pos = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL))
return -EINVAL;
if (ielen) {
buf = _malloc(ielen);
if (buf == NULL)
return -ENOMEM;
memcpy(buf, pie , ielen);
pos = buf;
if (ielen < RSN_HEADER_LEN) {
ret = -EINVAL;
goto exit;
}
if (r8712_parse_wpa_ie(buf, ielen, &group_cipher,
&pairwise_cipher) == _SUCCESS) {
padapter->securitypriv.AuthAlgrthm = 2;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPAPSK;
}
if (r8712_parse_wpa2_ie(buf, ielen, &group_cipher,
&pairwise_cipher) == _SUCCESS) {
padapter->securitypriv.AuthAlgrthm = 2;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPA2PSK;
}
switch (group_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.XGrpPrivacy =
_NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.XGrpPrivacy = _WEP40_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.XGrpPrivacy = _TKIP_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.XGrpPrivacy = _AES_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.XGrpPrivacy = _WEP104_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
break;
}
switch (pairwise_cipher) {
case WPA_CIPHER_NONE:
padapter->securitypriv.PrivacyAlgrthm =
_NO_PRIVACY_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11EncryptionDisabled;
break;
case WPA_CIPHER_WEP40:
padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
break;
case WPA_CIPHER_TKIP:
padapter->securitypriv.PrivacyAlgrthm = _TKIP_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption2Enabled;
break;
case WPA_CIPHER_CCMP:
padapter->securitypriv.PrivacyAlgrthm = _AES_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption3Enabled;
break;
case WPA_CIPHER_WEP104:
padapter->securitypriv.PrivacyAlgrthm = _WEP104_;
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
break;
}
padapter->securitypriv.wps_phase = false;
{/* set wps_ie */
u16 cnt = 0;
u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
while (cnt < ielen) {
eid = buf[cnt];
if ((eid == _VENDOR_SPECIFIC_IE_) &&
(!memcmp(&buf[cnt+2], wps_oui, 4))) {
netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE\n");
padapter->securitypriv.wps_ie_len =
((buf[cnt+1] + 2) <
(MAX_WPA_IE_LEN << 2)) ?
(buf[cnt + 1] + 2) :
(MAX_WPA_IE_LEN << 2);
memcpy(padapter->securitypriv.wps_ie,
&buf[cnt],
padapter->securitypriv.wps_ie_len);
padapter->securitypriv.wps_phase =
true;
netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
cnt += buf[cnt+1]+2;
break;
} else
cnt += buf[cnt + 1] + 2;
}
}
}
exit:
kfree(buf);
return ret;
}
static int r8711_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX *prates = NULL;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) ==
true) {
/* parsing HT_CAP_IE */
p = r8712_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_,
&ht_ielen, pcur_bss->IELength - 12);
if (p && ht_ielen > 0)
ht_cap = true;
prates = &pcur_bss->SupportedRates;
if (r8712_is_cckratesonly_included((u8 *)prates) == true) {
if (ht_cap == true)
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11bn");
else
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11b");
} else if ((r8712_is_cckrates_included((u8 *)prates)) == true) {
if (ht_cap == true)
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11bgn");
else
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11bg");
} else {
if (ht_cap == true)
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11gn");
else
snprintf(wrqu->name, IFNAMSIZ,
"IEEE 802.11g");
}
} else
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
return 0;
}
static const long frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462,
2467, 2472, 2484, 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980,
5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210,
5220, 5230, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560,
5580, 5600, 5620, 5640, 5660, 5680, 5700, 5745, 5765, 5785, 5805,
5825
};
static int r8711_wx_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_freq *fwrq = &wrqu->freq;
int rc = 0;
/* If setting by frequency, convert to a channel */
if ((fwrq->e == 1) &&
(fwrq->m >= (int) 2.412e8) &&
(fwrq->m <= (int) 2.487e8)) {
int f = fwrq->m / 100000;
int c = 0;
while ((c < 14) && (f != frequency_list[c]))
c++;
fwrq->e = 0;
fwrq->m = c + 1;
}
/* Setting by channel number */
if ((fwrq->m > 14) || (fwrq->e > 0))
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
if ((channel < 1) || (channel > 14))
rc = -EINVAL;
else {
/* Yes ! We can set it !!! */
padapter->registrypriv.channel = channel;
}
}
return rc;
}
static int r8711_wx_get_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
wrqu->freq.m = ieee80211_wlan_frequencies[
pcur_bss->Configuration.DSConfig-1] * 100000;
wrqu->freq.e = 1;
wrqu->freq.i = pcur_bss->Configuration.DSConfig;
} else {
return -ENOLINK;
}
return 0;
}
static int r8711_wx_set_mode(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
enum NDIS_802_11_NETWORK_INFRASTRUCTURE networkType;
switch (wrqu->mode) {
case IW_MODE_AUTO:
networkType = Ndis802_11AutoUnknown;
break;
case IW_MODE_ADHOC:
networkType = Ndis802_11IBSS;
break;
case IW_MODE_MASTER:
networkType = Ndis802_11APMode;
break;
case IW_MODE_INFRA:
networkType = Ndis802_11Infrastructure;
break;
default:
return -EINVAL;
}
if (Ndis802_11APMode == networkType)
r8712_setopmode_cmd(padapter, networkType);
else
r8712_setopmode_cmd(padapter, Ndis802_11AutoUnknown);
r8712_set_802_11_infrastructure_mode(padapter, networkType);
return 0;
}
static int r8711_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)
wrqu->mode = IW_MODE_INFRA;
else if (check_fwstate(pmlmepriv,
WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE) == true)
wrqu->mode = IW_MODE_ADHOC;
else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
wrqu->mode = IW_MODE_MASTER;
else
wrqu->mode = IW_MODE_AUTO;
return 0;
}
static int r871x_wx_set_pmkid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct iw_pmksa *pPMK = (struct iw_pmksa *) extra;
u8 strZeroMacAddress[ETH_ALEN] = {0x00};
u8 strIssueBssid[ETH_ALEN] = {0x00};
u8 j, blInserted = false;
int intReturn = false;
/*
There are the BSSID information in the bssid.sa_data array.
If cmd is IW_PMKSA_FLUSH, it means the wpa_supplicant wants to clear
all the PMKID information. If cmd is IW_PMKSA_ADD, it means the
wpa_supplicant wants to add a PMKID/BSSID to driver.
If cmd is IW_PMKSA_REMOVE, it means the wpa_supplicant wants to
remove a PMKID/BSSID from driver.
*/
if (pPMK == NULL)
return -EINVAL;
memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
switch (pPMK->cmd) {
case IW_PMKSA_ADD:
if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN))
return intReturn;
else
intReturn = true;
blInserted = false;
/* overwrite PMKID */
for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].Bssid,
strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => rewrite
* with new PMKID. */
netdev_info(dev, "r8712u: %s: BSSID exists in the PMKList.\n",
__func__);
memcpy(psecuritypriv->PMKIDList[j].PMKID,
pPMK->pmkid, IW_PMKID_LEN);
psecuritypriv->PMKIDList[j].bUsed = true;
psecuritypriv->PMKIDIndex = j + 1;
blInserted = true;
break;
}
}
if (!blInserted) {
/* Find a new entry */
netdev_info(dev, "r8712u: %s: Use the new entry index = %d for this PMKID.\n",
__func__, psecuritypriv->PMKIDIndex);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->
PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
memcpy(psecuritypriv->PMKIDList[psecuritypriv->
PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].
bUsed = true;
psecuritypriv->PMKIDIndex++;
if (psecuritypriv->PMKIDIndex == NUM_PMKID_CACHE)
psecuritypriv->PMKIDIndex = 0;
}
break;
case IW_PMKSA_REMOVE:
intReturn = true;
for (j = 0; j < NUM_PMKID_CACHE; j++) {
if (!memcmp(psecuritypriv->PMKIDList[j].Bssid,
strIssueBssid, ETH_ALEN)) {
/* BSSID is matched, the same AP => Remove
* this PMKID information and reset it. */
memset(psecuritypriv->PMKIDList[j].Bssid,
0x00, ETH_ALEN);
psecuritypriv->PMKIDList[j].bUsed = false;
break;
}
}
break;
case IW_PMKSA_FLUSH:
memset(psecuritypriv->PMKIDList, 0,
sizeof(struct RT_PMKID_LIST) * NUM_PMKID_CACHE);
psecuritypriv->PMKIDIndex = 0;
intReturn = true;
break;
default:
netdev_info(dev, "r8712u: %s: unknown Command\n", __func__);
intReturn = false;
break;
}
return intReturn;
}
static int r8711_wx_get_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
wrqu->sens.value = 0;
wrqu->sens.fixed = 0; /* no auto select */
wrqu->sens.disabled = 1;
return 0;
}
static int r8711_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
u16 val;
int i;
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
/* Let's try to keep this struct in the same order as in
* linux/include/wireless.h
*/
/* TODO: See what values we can set, and remove the ones we can't
* set, or fill them with some default data.
*/
/* ~5 Mb/s real (802.11b) */
range->throughput = 5 * 1000 * 1000;
/* TODO: 8711 sensitivity ? */
/* signal level threshold range */
/* percent values between 0 and 100. */
range->max_qual.qual = 100;
range->max_qual.level = 100;
range->max_qual.noise = 100;
range->max_qual.updated = 7; /* Updated all three */
range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
range->avg_qual.level = 20 + -98;
range->avg_qual.noise = 0;
range->avg_qual.updated = 7; /* Updated all three */
range->num_bitrates = RATE_COUNT;
for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
range->bitrate[i] = rtl8180_rates[i];
range->min_frag = MIN_FRAG_THRESHOLD;
range->max_frag = MAX_FRAG_THRESHOLD;
range->pm_capa = 0;
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 16;
range->num_channels = 14;
for (i = 0, val = 0; i < 14; i++) {
/* Include only legal frequencies for some countries */
range->freq[val].i = i + 1;
range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
range->freq[val].e = 1;
val++;
if (val == IW_MAX_FREQUENCIES)
break;
}
range->num_frequency = val;
range->enc_capa = IW_ENC_CAPA_WPA |
IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP |
IW_ENC_CAPA_CIPHER_CCMP;
return 0;
}
static int r8711_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra);
static int r871x_wx_set_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
int ret = 0, len = 0;
char *ext;
struct _adapter *padapter = netdev_priv(dev);
struct iw_point *dwrq = (struct iw_point *)awrq;
len = dwrq->length;
ext = _malloc(len);
if (!ext)
return -ENOMEM;
if (copy_from_user(ext, dwrq->pointer, len)) {
kfree(ext);
return -EFAULT;
}
if (0 == strcasecmp(ext, "RSSI")) {
/*Return received signal strength indicator in -db for */
/* current AP */
/*<ssid> Rssi xx */
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct wlan_network *pcur_network = &pmlmepriv->cur_network;
/*static u8 xxxx; */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
sprintf(ext, "%s rssi %d",
pcur_network->network.Ssid.Ssid,
/*(xxxx=xxxx+10) */
((padapter->recvpriv.fw_rssi)>>1)-95
/*pcur_network->network.Rssi */
);
} else {
sprintf(ext, "OK");
}
} else if (0 == strcasecmp(ext, "LINKSPEED")) {
/*Return link speed in MBPS */
/*LinkSpeed xx */
union iwreq_data wrqd;
int ret_inner;
int mbps;
ret_inner = r8711_wx_get_rate(dev, info, &wrqd, extra);
if (0 != ret_inner)
mbps = 0;
else
mbps = wrqd.bitrate.value / 1000000;
sprintf(ext, "LINKSPEED %d", mbps);
} else if (0 == strcasecmp(ext, "MACADDR")) {
/*Return mac address of the station */
/* Macaddr = xx:xx:xx:xx:xx:xx */
sprintf(ext, "MACADDR = %pM", dev->dev_addr);
} else if (0 == strcasecmp(ext, "SCAN-ACTIVE")) {
/*Set scan type to active */
/*OK if successful */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
pmlmepriv->passive_mode = 1;
sprintf(ext, "OK");
} else if (0 == strcasecmp(ext, "SCAN-PASSIVE")) {
/*Set scan type to passive */
/*OK if successful */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
pmlmepriv->passive_mode = 0;
sprintf(ext, "OK");
} else if (0 == strncmp(ext, "DCE-E", 5)) {
/*Set scan type to passive */
/*OK if successful */
r8712_disconnectCtrlEx_cmd(padapter
, 1 /*u32 enableDrvCtrl */
, 5 /*u32 tryPktCnt */
, 100 /*u32 tryPktInterval */
, 5000 /*u32 firstStageTO */
);
sprintf(ext, "OK");
} else if (0 == strncmp(ext, "DCE-D", 5)) {
/*Set scan type to passive */
/*OK if successfu */
r8712_disconnectCtrlEx_cmd(padapter
, 0 /*u32 enableDrvCtrl */
, 5 /*u32 tryPktCnt */
, 100 /*u32 tryPktInterval */
, 5000 /*u32 firstStageTO */
);
sprintf(ext, "OK");
} else {
netdev_info(dev, "r8712u: %s: unknown Command %s.\n",
__func__, ext);
goto FREE_EXT;
}
if (copy_to_user(dwrq->pointer, ext,
min(dwrq->length, (__u16)(strlen(ext)+1))))
ret = -EFAULT;
FREE_EXT:
kfree(ext);
return ret;
}
/* set bssid flow
* s1. set_802_11_infrastructure_mode()
* s2. set_802_11_authentication_mode()
* s3. set_802_11_encryption_mode()
* s4. set_802_11_bssid()
*
* This function intends to handle the Set AP command, which specifies the
* MAC# of a preferred Access Point.
* Currently, the request comes via Wireless Extensions' SIOCSIWAP ioctl.
*
* For this operation to succeed, there is no need for the interface to be up.
*
*/
static int r8711_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
int ret = -EINPROGRESS;
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct sockaddr *temp = (struct sockaddr *)awrq;
unsigned long irqL;
struct list_head *phead;
u8 *dst_bssid;
struct wlan_network *pnetwork = NULL;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
return -EBUSY;
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
return ret;
if (temp->sa_family != ARPHRD_ETHER)
return -EINVAL;
authmode = padapter->securitypriv.ndisauthtype;
spin_lock_irqsave(&queue->lock, irqL);
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned) == true)
break;
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
struct wlan_network, list);
pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
dst_bssid = pnetwork->network.MacAddress;
if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
r8712_set_802_11_infrastructure_mode(padapter,
pnetwork->network.InfrastructureMode);
break;
}
}
spin_unlock_irqrestore(&queue->lock, irqL);
if (!ret) {
if (!r8712_set_802_11_authentication_mode(padapter, authmode))
ret = -ENOMEM;
else {
if (!r8712_set_802_11_bssid(padapter, temp->sa_data))
ret = -1;
}
}
return ret;
}
static int r8711_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
WIFI_AP_STATE))
memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
else
memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
return 0;
}
static int r871x_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
u16 reason;
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *) extra;
if (mlme == NULL)
return -1;
reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
if (!r8712_set_802_11_disassociate(padapter))
ret = -1;
break;
case IW_MLME_DISASSOC:
if (!r8712_set_802_11_disassociate(padapter))
ret = -1;
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
/**
*
* This function intends to handle the Set Scan command.
* Currently, the request comes via Wireless Extensions' SIOCSIWSCAN ioctl.
*
* For this operation to succeed, the interface is brought Up beforehand.
*
*/
static int r8711_wx_set_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 status = true;
if (padapter->bDriverStopped == true) {
netdev_info(dev, "In %s: bDriverStopped=%d\n",
__func__, padapter->bDriverStopped);
return -1;
}
if (padapter->bup == false)
return -ENETDOWN;
if (padapter->hw_init_completed == false)
return -1;
if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
(pmlmepriv->sitesurveyctrl.traffic_busy == true))
return 0;
if (wrqu->data.length == sizeof(struct iw_scan_req)) {
struct iw_scan_req *req = (struct iw_scan_req *)extra;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct ndis_802_11_ssid ssid;
unsigned long irqL;
u32 len = min_t(u8, req->essid_len, IW_ESSID_MAX_SIZE);
memset((unsigned char *)&ssid, 0,
sizeof(struct ndis_802_11_ssid));
memcpy(ssid.Ssid, req->essid, len);
ssid.SsidLength = len;
spin_lock_irqsave(&pmlmepriv->lock, irqL);
if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY |
_FW_UNDER_LINKING)) ||
(pmlmepriv->sitesurveyctrl.traffic_busy == true)) {
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
status = false;
} else
status = r8712_sitesurvey_cmd(padapter, &ssid);
spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
}
} else
status = r8712_set_802_11_bssid_list_scan(padapter);
if (status == false)
return -1;
return 0;
}
static int r8711_wx_get_scan(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
unsigned long irqL;
struct list_head *plist, *phead;
char *ev = extra;
char *stop = ev + wrqu->data.length;
u32 ret = 0, cnt = 0;
if (padapter->bDriverStopped)
return -EINVAL;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
msleep(30);
cnt++;
if (cnt > 100)
break;
}
spin_lock_irqsave(&queue->lock, irqL);
phead = get_list_head(queue);
plist = get_next(phead);
while (1) {
if (end_of_queue_search(phead, plist) == true)
break;
if ((stop - ev) < SCAN_ITEM_SIZE) {
ret = -E2BIG;
break;
}
pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
ev = translate_scan(padapter, a, pnetwork, ev, stop);
plist = get_next(plist);
}
spin_unlock_irqrestore(&queue->lock, irqL);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
return ret;
}
/* set ssid flow
* s1. set_802_11_infrastructure_mode()
* s2. set_802_11_authenticaion_mode()
* s3. set_802_11_encryption_mode()
* s4. set_802_11_ssid()
*
* This function intends to handle the Set ESSID command.
* Currently, the request comes via the Wireless Extensions' SIOCSIWESSID ioctl.
*
* For this operation to succeed, there is no need for the interface to be Up.
*
*/
static int r8711_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
struct ndis_802_11_ssid ndis_ssid;
u8 *dst_ssid, *src_ssid;
struct list_head *phead;
u32 len;
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
return -EBUSY;
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
return 0;
if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
authmode = padapter->securitypriv.ndisauthtype;
if (wrqu->essid.flags && wrqu->essid.length) {
len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ?
wrqu->essid.length : IW_ESSID_MAX_SIZE;
memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
ndis_ssid.SsidLength = len;
memcpy(ndis_ssid.Ssid, extra, len);
src_ssid = ndis_ssid.Ssid;
phead = get_list_head(queue);
pmlmepriv->pscanned = get_next(phead);
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
struct wlan_network, list);
pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
dst_ssid = pnetwork->network.Ssid.Ssid;
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
&& (pnetwork->network.Ssid.SsidLength ==
ndis_ssid.SsidLength)) {
if (check_fwstate(pmlmepriv,
WIFI_ADHOC_STATE)) {
if (pnetwork->network.
InfrastructureMode
!=
padapter->mlmepriv.
cur_network.network.
InfrastructureMode)
continue;
}
r8712_set_802_11_infrastructure_mode(
padapter,
pnetwork->network.InfrastructureMode);
break;
}
}
r8712_set_802_11_authentication_mode(padapter, authmode);
r8712_set_802_11_ssid(padapter, &ndis_ssid);
}
return -EINPROGRESS;
}
static int r8711_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
u32 len, ret = 0;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
len = pcur_bss->Ssid.SsidLength;
wrqu->essid.length = len;
memcpy(extra, pcur_bss->Ssid.Ssid, len);
wrqu->essid.flags = 1;
} else {
ret = -ENOLINK;
}
return ret;
}
static int r8711_wx_set_rate(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
u32 target_rate = wrqu->bitrate.value;
u32 fixed = wrqu->bitrate.fixed;
u32 ratevalue = 0;
u8 datarates[NumRates];
u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
int i, ret = 0;
if (target_rate == -1) {
ratevalue = 11;
goto set_rate;
}
target_rate = target_rate / 100000;
switch (target_rate) {
case 10:
ratevalue = 0;
break;
case 20:
ratevalue = 1;
break;
case 55:
ratevalue = 2;
break;
case 60:
ratevalue = 3;
break;
case 90:
ratevalue = 4;
break;
case 110:
ratevalue = 5;
break;
case 120:
ratevalue = 6;
break;
case 180:
ratevalue = 7;
break;
case 240:
ratevalue = 8;
break;
case 360:
ratevalue = 9;
break;
case 480:
ratevalue = 10;
break;
case 540:
ratevalue = 11;
break;
default:
ratevalue = 11;
break;
}
set_rate:
for (i = 0; i < NumRates; i++) {
if (ratevalue == mpdatarate[i]) {
datarates[i] = mpdatarate[i];
if (fixed == 0)
break;
} else
datarates[i] = 0xff;
}
if (r8712_setdatarate_cmd(padapter, datarates) != _SUCCESS)
ret = -ENOMEM;
return ret;
}
static int r8711_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
struct ieee80211_ht_cap *pht_capie;
unsigned char rf_type = padapter->registrypriv.rf_config;
int i;
u8 *p;
u16 rate, max_rate = 0, ht_cap = false;
u32 ht_ielen = 0;
u8 bw_40MHz = 0, short_GI = 0;
u16 mcs_rate = 0;
i = 0;
if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
p = r8712_get_ie(&pcur_bss->IEs[12],
_HT_CAPABILITY_IE_, &ht_ielen,
pcur_bss->IELength - 12);
if (p && ht_ielen > 0) {
ht_cap = true;
pht_capie = (struct ieee80211_ht_cap *)(p + 2);
memcpy(&mcs_rate , pht_capie->supp_mcs_set, 2);
bw_40MHz = (pht_capie->cap_info &
IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
short_GI = (pht_capie->cap_info &
(IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
}
while ((pcur_bss->SupportedRates[i] != 0) &&
(pcur_bss->SupportedRates[i] != 0xFF)) {
rate = pcur_bss->SupportedRates[i] & 0x7F;
if (rate > max_rate)
max_rate = rate;
wrqu->bitrate.fixed = 0; /* no auto select */
wrqu->bitrate.value = rate*500000;
i++;
}
if (ht_cap == true) {
if (mcs_rate & 0x8000 /* MCS15 */
&&
RTL8712_RF_2T2R == rf_type)
max_rate = (bw_40MHz) ? ((short_GI) ? 300 :
270) : ((short_GI) ? 144 : 130);
else if (mcs_rate & 0x0080) /* MCS7 */
max_rate = (bw_40MHz) ? ((short_GI) ? 150 :
135) : ((short_GI) ? 72 : 65);
else /* default MCS7 */
max_rate = (bw_40MHz) ? ((short_GI) ? 150 :
135) : ((short_GI) ? 72 : 65);
max_rate *= 2; /* Mbps/2 */
wrqu->bitrate.value = max_rate * 500000;
} else {
wrqu->bitrate.value = max_rate * 500000;
}
} else
return -ENOLINK;
return 0;
}
static int r8711_wx_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
wrqu->rts.value = padapter->registrypriv.rts_thresh;
wrqu->rts.fixed = 0; /* no auto select */
return 0;
}
static int r8711_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
if (wrqu->frag.disabled)
padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
else {
if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
wrqu->frag.value > MAX_FRAG_THRESHOLD)
return -EINVAL;
padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
}
return 0;
}
static int r8711_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
wrqu->frag.value = padapter->xmitpriv.frag_len;
wrqu->frag.fixed = 0; /* no auto select */
return 0;
}
static int r8711_wx_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
wrqu->retry.value = 7;
wrqu->retry.fixed = 0; /* no auto select */
wrqu->retry.disabled = 1;
return 0;
}
static int r8711_wx_set_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
u32 key;
u32 keyindex_provided;
struct NDIS_802_11_WEP wep;
enum NDIS_802_11_AUTHENTICATION_MODE authmode;
struct iw_point *erq = &(wrqu->encoding);
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
key = erq->flags & IW_ENCODE_INDEX;
memset(&wep, 0, sizeof(struct NDIS_802_11_WEP));
if (erq->flags & IW_ENCODE_DISABLED) {
netdev_info(dev, "r8712u: %s: EncryptionDisabled\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11EncryptionDisabled;
padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.XGrpPrivacy = _NO_PRIVACY_;
padapter->securitypriv.AuthAlgrthm = 0; /* open system */
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
return 0;
}
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
keyindex_provided = 1;
} else {
keyindex_provided = 0;
key = padapter->securitypriv.PrivacyKeyIndex;
}
/* set authentication mode */
if (erq->flags & IW_ENCODE_OPEN) {
netdev_info(dev, "r8712u: %s: IW_ENCODE_OPEN\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.AuthAlgrthm = 0; /* open system */
padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.XGrpPrivacy = _NO_PRIVACY_;
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
} else if (erq->flags & IW_ENCODE_RESTRICTED) {
netdev_info(dev, "r8712u: %s: IW_ENCODE_RESTRICTED\n", __func__);
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.AuthAlgrthm = 1; /* shared system */
padapter->securitypriv.PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.XGrpPrivacy = _WEP40_;
authmode = Ndis802_11AuthModeShared;
padapter->securitypriv.ndisauthtype = authmode;
} else {
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption1Enabled;
padapter->securitypriv.AuthAlgrthm = 0; /* open system */
padapter->securitypriv.PrivacyAlgrthm = _NO_PRIVACY_;
padapter->securitypriv.XGrpPrivacy = _NO_PRIVACY_;
authmode = Ndis802_11AuthModeOpen;
padapter->securitypriv.ndisauthtype = authmode;
}
wep.KeyIndex = key;
if (erq->length > 0) {
wep.KeyLength = erq->length <= 5 ? 5 : 13;
wep.Length = wep.KeyLength +
FIELD_OFFSET(struct NDIS_802_11_WEP, KeyMaterial);
} else {
wep.KeyLength = 0;
if (keyindex_provided == 1) { /* set key_id only, no given
* KeyMaterial(erq->length==0).*/
padapter->securitypriv.PrivacyKeyIndex = key;
switch (padapter->securitypriv.DefKeylen[key]) {
case 5:
padapter->securitypriv.PrivacyAlgrthm =
_WEP40_;
break;
case 13:
padapter->securitypriv.PrivacyAlgrthm =
_WEP104_;
break;
default:
padapter->securitypriv.PrivacyAlgrthm =
_NO_PRIVACY_;
break;
}
return 0;
}
}
wep.KeyIndex |= 0x80000000; /* transmit key */
memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
if (r8712_set_802_11_add_wep(padapter, &wep) == _FAIL)
return -EOPNOTSUPP;
return 0;
}
static int r8711_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
uint key, ret = 0;
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_point *erq = &(wrqu->encoding);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
}
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
if (key > WEP_KEYS)
return -EINVAL;
key--;
} else {
key = padapter->securitypriv.PrivacyKeyIndex;
}
erq->flags = key + 1;
switch (padapter->securitypriv.ndisencryptstatus) {
case Ndis802_11EncryptionNotSupported:
case Ndis802_11EncryptionDisabled:
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
break;
case Ndis802_11Encryption1Enabled:
erq->length = padapter->securitypriv.DefKeylen[key];
if (erq->length) {
memcpy(keybuf, padapter->securitypriv.DefKey[
key].skey, padapter->securitypriv.
DefKeylen[key]);
erq->flags |= IW_ENCODE_ENABLED;
if (padapter->securitypriv.ndisauthtype ==
Ndis802_11AuthModeOpen)
erq->flags |= IW_ENCODE_OPEN;
else if (padapter->securitypriv.ndisauthtype ==
Ndis802_11AuthModeShared)
erq->flags |= IW_ENCODE_RESTRICTED;
} else {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
}
break;
case Ndis802_11Encryption2Enabled:
case Ndis802_11Encryption3Enabled:
erq->length = 16;
erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN |
IW_ENCODE_NOKEY);
break;
default:
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
break;
}
return ret;
}
static int r8711_wx_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
wrqu->power.value = 0;
wrqu->power.fixed = 0; /* no auto select */
wrqu->power.disabled = 1;
return 0;
}
static int r871x_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
return r871x_set_wpa_ie(padapter, extra, wrqu->data.length);
}
static int r871x_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_param *param = (struct iw_param *)&(wrqu->param);
int paramid;
int paramval;
int ret = 0;
paramid = param->flags & IW_AUTH_INDEX;
paramval = param->value;
switch (paramid) {
case IW_AUTH_WPA_VERSION:
break;
case IW_AUTH_CIPHER_PAIRWISE:
break;
case IW_AUTH_CIPHER_GROUP:
break;
case IW_AUTH_KEY_MGMT:
/*
* ??? does not use these parameters
*/
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
if (paramval) {
/* wpa_supplicant is enabling tkip countermeasure. */
padapter->securitypriv.btkip_countermeasure = true;
} else {
/* wpa_supplicant is disabling tkip countermeasure. */
padapter->securitypriv.btkip_countermeasure = false;
}
break;
case IW_AUTH_DROP_UNENCRYPTED:
/* HACK:
*
* wpa_supplicant calls set_wpa_enabled when the driver
* is loaded and unloaded, regardless of if WPA is being
* used. No other calls are made which can be used to
* determine if encryption will be used or not prior to
* association being expected. If encryption is not being
* used, drop_unencrypted is set to false, else true -- we
* can use this to determine if the CAP_PRIVACY_ON bit should
* be set.
*/
if (padapter->securitypriv.ndisencryptstatus ==
Ndis802_11Encryption1Enabled) {
/* it means init value, or using wep,
* ndisencryptstatus =
* Ndis802_11Encryption1Enabled,
* then it needn't reset it;
*/
break;
}
if (paramval) {
padapter->securitypriv.ndisencryptstatus =
Ndis802_11EncryptionDisabled;
padapter->securitypriv.PrivacyAlgrthm =
_NO_PRIVACY_;
padapter->securitypriv.XGrpPrivacy =
_NO_PRIVACY_;
padapter->securitypriv.AuthAlgrthm = 0;
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeOpen;
}
break;
case IW_AUTH_80211_AUTH_ALG:
ret = wpa_set_auth_algs(dev, (u32)paramval);
break;
case IW_AUTH_WPA_ENABLED:
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
break;
case IW_AUTH_PRIVACY_INVOKED:
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static int r871x_wx_set_enc_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_point *pencoding = &wrqu->encoding;
struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
struct ieee_param *param = NULL;
char *alg_name;
u32 param_len;
int ret = 0;
param_len = sizeof(struct ieee_param) + pext->key_len;
param = (struct ieee_param *)_malloc(param_len);
if (param == NULL)
return -ENOMEM;
memset(param, 0, param_len);
param->cmd = IEEE_CMD_SET_ENCRYPTION;
memset(param->sta_addr, 0xff, ETH_ALEN);
switch (pext->alg) {
case IW_ENCODE_ALG_NONE:
alg_name = "none";
break;
case IW_ENCODE_ALG_WEP:
alg_name = "WEP";
break;
case IW_ENCODE_ALG_TKIP:
alg_name = "TKIP";
break;
case IW_ENCODE_ALG_CCMP:
alg_name = "CCMP";
break;
default:
return -EINVAL;
}
strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
param->u.crypt.set_tx = 0;
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
param->u.crypt.idx = (pencoding->flags & 0x00FF) - 1;
if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
memcpy(param->u.crypt.seq, pext->rx_seq, 8);
if (pext->key_len) {
param->u.crypt.key_len = pext->key_len;
memcpy(param + 1, pext + 1, pext->key_len);
}
ret = wpa_set_encryption(dev, param, param_len);
kfree(param);
return ret;
}
static int r871x_wx_get_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
if (extra) {
wrqu->data.length = 8;
wrqu->data.flags = 1;
memcpy(extra, "rtl_wifi", 8);
}
return 0;
}
static int r8711_wx_read32(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
u32 addr;
u32 data32;
get_user(addr, (u32 __user *)wrqu->data.pointer);
data32 = r8712_read32(padapter, addr);
put_user(data32, (u32 __user *)wrqu->data.pointer);
wrqu->data.length = (data32 & 0xffff0000) >> 16;
wrqu->data.flags = data32 & 0xffff;
get_user(addr, (u32 __user *)wrqu->data.pointer);
return 0;
}
static int r8711_wx_write32(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *keybuf)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
u32 addr;
u32 data32;
get_user(addr, (u32 __user *)wrqu->data.pointer);
data32 = ((u32)wrqu->data.length<<16) | (u32)wrqu->data.flags;
r8712_write32(padapter, addr, data32);
return 0;
}
static int dummy(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
return -ENOSYS;
}
static int r8711_drvext_hdl(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
return 0;
}
static int r871x_mp_ioctl_hdl(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_point *p = &wrqu->data;
struct oid_par_priv oid_par;
struct mp_ioctl_handler *phandler;
struct mp_ioctl_param *poidparam;
unsigned long BytesRead, BytesWritten, BytesNeeded;
u8 *pparmbuf = NULL, bset;
u16 len;
uint status;
int ret = 0;
if ((!p->length) || (!p->pointer)) {
ret = -EINVAL;
goto _r871x_mp_ioctl_hdl_exit;
}
bset = (u8)(p->flags & 0xFFFF);
len = p->length;
pparmbuf = NULL;
pparmbuf = (u8 *)_malloc(len);
if (pparmbuf == NULL) {
ret = -ENOMEM;
goto _r871x_mp_ioctl_hdl_exit;
}
if (copy_from_user(pparmbuf, p->pointer, len)) {
ret = -EFAULT;
goto _r871x_mp_ioctl_hdl_exit;
}
poidparam = (struct mp_ioctl_param *)pparmbuf;
if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
ret = -EINVAL;
goto _r871x_mp_ioctl_hdl_exit;
}
phandler = mp_ioctl_hdl + poidparam->subcode;
if ((phandler->paramsize != 0) &&
(poidparam->len < phandler->paramsize)) {
ret = -EINVAL;
goto _r871x_mp_ioctl_hdl_exit;
}
if (phandler->oid == 0 && phandler->handler)
status = phandler->handler(&oid_par);
else if (phandler->handler) {
oid_par.adapter_context = padapter;
oid_par.oid = phandler->oid;
oid_par.information_buf = poidparam->data;
oid_par.information_buf_len = poidparam->len;
oid_par.dbg = 0;
BytesWritten = 0;
BytesNeeded = 0;
if (bset) {
oid_par.bytes_rw = &BytesRead;
oid_par.bytes_needed = &BytesNeeded;
oid_par.type_of_oid = SET_OID;
} else {
oid_par.bytes_rw = &BytesWritten;
oid_par.bytes_needed = &BytesNeeded;
oid_par.type_of_oid = QUERY_OID;
}
status = phandler->handler(&oid_par);
/* todo:check status, BytesNeeded, etc. */
} else {
netdev_info(dev, "r8712u: %s: err!, subcode=%d, oid=%d, handler=%p\n",
__func__, poidparam->subcode, phandler->oid,
phandler->handler);
ret = -EFAULT;
goto _r871x_mp_ioctl_hdl_exit;
}
if (bset == 0x00) { /* query info */
if (copy_to_user(p->pointer, pparmbuf, len))
ret = -EFAULT;
}
if (status) {
ret = -EFAULT;
goto _r871x_mp_ioctl_hdl_exit;
}
_r871x_mp_ioctl_hdl_exit:
kfree(pparmbuf);
return ret;
}
static int r871x_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct iw_point *pdata = &wrqu->data;
struct wlan_network *pnetwork = NULL;
u32 cnt = 0, wpa_ielen;
unsigned long irqL;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
char data[32];
if (padapter->bDriverStopped || (pdata == NULL))
return -EINVAL;
while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
msleep(30);
cnt++;
if (cnt > 100)
break;
}
pdata->flags = 0;
if (pdata->length >= 32) {
if (copy_from_user(data, pdata->pointer, 32))
return -EINVAL;
} else
return -EINVAL;
spin_lock_irqsave(&(pmlmepriv->scanned_queue.lock), irqL);
phead = get_list_head(queue);
plist = get_next(phead);
while (1) {
if (end_of_queue_search(phead, plist) == true)
break;
pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
if (hwaddr_aton_i(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock),
irqL);
return -EINVAL;
}
netdev_info(dev, "r8712u: BSSID:%pM\n", bssid);
if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN)) {
/* BSSID match, then check if supporting wpa/wpa2 */
pbuf = r8712_get_wpa_ie(&pnetwork->network.IEs[12],
&wpa_ielen, pnetwork->network.IELength-12);
if (pbuf && (wpa_ielen > 0)) {
pdata->flags = 1;
break;
}
pbuf = r8712_get_wpa2_ie(&pnetwork->network.IEs[12],
&wpa_ielen, pnetwork->network.IELength-12);
if (pbuf && (wpa_ielen > 0)) {
pdata->flags = 2;
break;
}
}
plist = get_next(plist);
}
spin_unlock_irqrestore(&(pmlmepriv->scanned_queue.lock), irqL);
if (pdata->length >= 34) {
if (copy_to_user((u8 __user *)pdata->pointer + 32,
(u8 *)&pdata->flags, 1))
return -EINVAL;
}
return 0;
}
static int r871x_set_pid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
if ((padapter->bDriverStopped) || (pdata == NULL))
return -EINVAL;
if (copy_from_user(&padapter->pid, pdata->pointer, sizeof(int)))
return -EINVAL;
return 0;
}
static int r871x_set_chplan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
int ch_plan = -1;
if ((padapter->bDriverStopped) || (pdata == NULL)) {
ret = -EINVAL;
goto exit;
}
ch_plan = (int)*extra;
r8712_set_chplan_cmd(padapter, ch_plan);
exit:
return ret;
}
static int r871x_wps_start(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
if ((padapter->bDriverStopped) || (pdata == NULL))
return -EINVAL;
if (copy_from_user((void *)&u32wps_start, pdata->pointer, 4))
return -EFAULT;
if (u32wps_start == 0)
u32wps_start = *extra;
if (u32wps_start == 1) /* WPS Start */
padapter->ledpriv.LedControlHandler(padapter,
LED_CTL_START_WPS);
else if (u32wps_start == 2) /* WPS Stop because of wps success */
padapter->ledpriv.LedControlHandler(padapter,
LED_CTL_STOP_WPS);
else if (u32wps_start == 3) /* WPS Stop because of wps fail */
padapter->ledpriv.LedControlHandler(padapter,
LED_CTL_STOP_WPS_FAIL);
return 0;
}
static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
switch (name) {
case IEEE_PARAM_WPA_ENABLED:
padapter->securitypriv.AuthAlgrthm = 2; /* 802.1x */
switch ((value)&0xff) {
case 1: /* WPA */
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption2Enabled;
break;
case 2: /* WPA2 */
padapter->securitypriv.ndisauthtype =
Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
padapter->securitypriv.ndisencryptstatus =
Ndis802_11Encryption3Enabled;
break;
}
break;
case IEEE_PARAM_TKIP_COUNTERMEASURES:
break;
case IEEE_PARAM_DROP_UNENCRYPTED:
/* HACK:
*
* wpa_supplicant calls set_wpa_enabled when the driver
* is loaded and unloaded, regardless of if WPA is being
* used. No other calls are made which can be used to
* determine if encryption will be used or not prior to
* association being expected. If encryption is not being
* used, drop_unencrypted is set to false, else true -- we
* can use this to determine if the CAP_PRIVACY_ON bit should
* be set.
*/
break;
case IEEE_PARAM_PRIVACY_INVOKED:
break;
case IEEE_PARAM_AUTH_ALGS:
return wpa_set_auth_algs(dev, value);
break;
case IEEE_PARAM_IEEE_802_1X:
break;
case IEEE_PARAM_WPAX_SELECT:
/* added for WPA2 mixed mode */
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
switch (command) {
case IEEE_MLME_STA_DEAUTH:
if (!r8712_set_802_11_disassociate(padapter))
return -1;
break;
case IEEE_MLME_STA_DISASSOC:
if (!r8712_set_802_11_disassociate(padapter))
return -1;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
{
struct ieee_param *param;
int ret = 0;
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
if (p->length < sizeof(struct ieee_param) || !p->pointer)
return -EINVAL;
param = (struct ieee_param *)_malloc(p->length);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree((u8 *)param);
return -EFAULT;
}
switch (param->cmd) {
case IEEE_CMD_SET_WPA_PARAM:
ret = wpa_set_param(dev, param->u.wpa_param.name,
param->u.wpa_param.value);
break;
case IEEE_CMD_SET_WPA_IE:
ret = r871x_set_wpa_ie(padapter, (char *)param->u.wpa_ie.data,
(u16)param->u.wpa_ie.len);
break;
case IEEE_CMD_SET_ENCRYPTION:
ret = wpa_set_encryption(dev, param, p->length);
break;
case IEEE_CMD_MLME:
ret = wpa_mlme(dev, param->u.mlme.command,
param->u.mlme.reason_code);
break;
default:
ret = -EOPNOTSUPP;
break;
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
kfree((u8 *)param);
return ret;
}
/* based on "driver_ipw" and for hostapd */
int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *)rq;
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
return wpa_supplicant_ioctl(dev, &wrq->u.data);
default:
return -EOPNOTSUPP;
}
return 0;
}
static iw_handler r8711_handlers[] = {
NULL, /* SIOCSIWCOMMIT */
r8711_wx_get_name, /* SIOCGIWNAME */
dummy, /* SIOCSIWNWID */
dummy, /* SIOCGIWNWID */
r8711_wx_set_freq, /* SIOCSIWFREQ */
r8711_wx_get_freq, /* SIOCGIWFREQ */
r8711_wx_set_mode, /* SIOCSIWMODE */
r8711_wx_get_mode, /* SIOCGIWMODE */
dummy, /* SIOCSIWSENS */
r8711_wx_get_sens, /* SIOCGIWSENS */
NULL, /* SIOCSIWRANGE */
r8711_wx_get_range, /* SIOCGIWRANGE */
r871x_wx_set_priv, /* SIOCSIWPRIV */
NULL, /* SIOCGIWPRIV */
NULL, /* SIOCSIWSTATS */
NULL, /* SIOCGIWSTATS */
dummy, /* SIOCSIWSPY */
dummy, /* SIOCGIWSPY */
NULL, /* SIOCGIWTHRSPY */
NULL, /* SIOCWIWTHRSPY */
r8711_wx_set_wap, /* SIOCSIWAP */
r8711_wx_get_wap, /* SIOCGIWAP */
r871x_wx_set_mlme, /* request MLME operation;
* uses struct iw_mlme */
dummy, /* SIOCGIWAPLIST -- deprecated */
r8711_wx_set_scan, /* SIOCSIWSCAN */
r8711_wx_get_scan, /* SIOCGIWSCAN */
r8711_wx_set_essid, /* SIOCSIWESSID */
r8711_wx_get_essid, /* SIOCGIWESSID */
dummy, /* SIOCSIWNICKN */
r871x_wx_get_nick, /* SIOCGIWNICKN */
NULL, /* -- hole -- */
NULL, /* -- hole -- */
r8711_wx_set_rate, /* SIOCSIWRATE */
r8711_wx_get_rate, /* SIOCGIWRATE */
dummy, /* SIOCSIWRTS */
r8711_wx_get_rts, /* SIOCGIWRTS */
r8711_wx_set_frag, /* SIOCSIWFRAG */
r8711_wx_get_frag, /* SIOCGIWFRAG */
dummy, /* SIOCSIWTXPOW */
dummy, /* SIOCGIWTXPOW */
dummy, /* SIOCSIWRETRY */
r8711_wx_get_retry, /* SIOCGIWRETRY */
r8711_wx_set_enc, /* SIOCSIWENCODE */
r8711_wx_get_enc, /* SIOCGIWENCODE */
dummy, /* SIOCSIWPOWER */
r8711_wx_get_power, /* SIOCGIWPOWER */
NULL, /*---hole---*/
NULL, /*---hole---*/
r871x_wx_set_gen_ie, /* SIOCSIWGENIE */
NULL, /* SIOCGIWGENIE */
r871x_wx_set_auth, /* SIOCSIWAUTH */
NULL, /* SIOCGIWAUTH */
r871x_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
NULL, /* SIOCGIWENCODEEXT */
r871x_wx_set_pmkid, /* SIOCSIWPMKSA */
NULL, /*---hole---*/
};
static const struct iw_priv_args r8711_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "read32"
},
{
SIOCIWFIRSTPRIV + 0x1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "write32"
},
{
SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
},
{
SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
},
{
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
},
{
SIOCIWFIRSTPRIV + 0x5,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setpid"
},
{
SIOCIWFIRSTPRIV + 0x6,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
},
{
SIOCIWFIRSTPRIV + 0x7,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "chplan"
}
};
static iw_handler r8711_private_handler[] = {
r8711_wx_read32,
r8711_wx_write32,
r8711_drvext_hdl,
r871x_mp_ioctl_hdl,
r871x_get_ap_info, /*for MM DTV platform*/
r871x_set_pid,
r871x_wps_start,
r871x_set_chplan
};
static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
{
struct _adapter *padapter = (struct _adapter *) netdev_priv(dev);
struct iw_statistics *piwstats = &padapter->iwstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) != true) {
piwstats->qual.qual = 0;
piwstats->qual.level = 0;
piwstats->qual.noise = 0;
} else {
/* show percentage, we need transfer dbm to orignal value. */
tmp_level = padapter->recvpriv.fw_rssi;
tmp_qual = padapter->recvpriv.signal;
tmp_noise = padapter->recvpriv.noise;
piwstats->qual.level = tmp_level;
piwstats->qual.qual = tmp_qual;
piwstats->qual.noise = tmp_noise;
}
piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
return &padapter->iwstats;
}
struct iw_handler_def r871x_handlers_def = {
.standard = r8711_handlers,
.num_standard = ARRAY_SIZE(r8711_handlers),
.private = r8711_private_handler,
.private_args = (struct iw_priv_args *)r8711_private_args,
.num_private = ARRAY_SIZE(r8711_private_handler),
.num_private_args = sizeof(r8711_private_args) /
sizeof(struct iw_priv_args),
.get_wireless_stats = r871x_get_wireless_stats
};
| gpl-2.0 |
wanam/Adam-Kernel-GS4 | drivers/base/regmap/regmap-debugfs.c | 76 | 11532 | /*
* Register map access API - debugfs
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/device.h>
#include "internal.h"
static struct dentry *regmap_debugfs_root;
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
{
snprintf(buf, buf_size, "%x", max_val);
return strlen(buf);
}
static ssize_t regmap_name_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct regmap *map = file->private_data;
int ret;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
if (ret < 0) {
kfree(buf);
return ret;
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static const struct file_operations regmap_name_fops = {
.open = simple_open,
.read = regmap_name_read_file,
.llseek = default_llseek,
};
static void regmap_debugfs_free_dump_cache(struct regmap *map)
{
struct regmap_debugfs_off_cache *c;
while (!list_empty(&map->debugfs_off_cache)) {
c = list_first_entry(&map->debugfs_off_cache,
struct regmap_debugfs_off_cache,
list);
list_del(&c->list);
kfree(c);
}
}
/*
* Work out where the start offset maps into register numbers, bearing
* in mind that we suppress hidden registers.
*/
static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int base,
loff_t from,
loff_t *pos)
{
struct regmap_debugfs_off_cache *c = NULL;
loff_t p = 0;
unsigned int i, ret = 0;
unsigned int fpos_offset;
unsigned int reg_offset;
/*
* If we don't have a cache build one so we don't have to do a
* linear scan each time.
*/
mutex_lock(&map->cache_lock);
i = base;
if (list_empty(&map->debugfs_off_cache)) {
for (; i <= map->max_register; i++) {
/* Skip unprinted registers, closing off cache entry */
if (!regmap_readable(map, i) ||
regmap_precious(map, i)) {
if (c) {
c->max = p - 1;
c->max_reg = i - 1;
list_add_tail(&c->list,
&map->debugfs_off_cache);
c = NULL;
}
continue;
}
/* No cache entry? Start a new one */
if (!c) {
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
return base;
}
c->min = p;
c->base_reg = i;
}
p += map->debugfs_tot_len;
}
}
/* Close the last entry off if we didn't scan beyond it */
if (c) {
c->max = p - 1;
c->max_reg = i - 1;
list_add_tail(&c->list,
&map->debugfs_off_cache);
}
/*
* This should never happen; we return above if we fail to
* allocate and we should never be in this code if there are
* no registers at all.
*/
if (list_empty(&map->debugfs_off_cache)) {
WARN_ON(list_empty(&map->debugfs_off_cache));
mutex_unlock(&map->cache_lock);
return base;
}
/* Find the relevant block:offset */
list_for_each_entry(c, &map->debugfs_off_cache, list) {
if (from >= c->min && from <= c->max) {
fpos_offset = from - c->min;
reg_offset = fpos_offset / map->debugfs_tot_len;
*pos = c->min + (reg_offset * map->debugfs_tot_len);
mutex_unlock(&map->cache_lock);
return c->base_reg + reg_offset;
}
*pos = c->max;
ret = c->max_reg;
}
mutex_unlock(&map->cache_lock);
return ret;
}
static inline void regmap_calc_tot_len(struct regmap *map,
void *buf, size_t count)
{
/* Calculate the length of a fixed format */
if (!map->debugfs_tot_len) {
map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
buf, count);
map->debugfs_val_len = 2 * map->format.val_bytes;
map->debugfs_tot_len = map->debugfs_reg_len +
map->debugfs_val_len + 3; /* : \n */
}
}
static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
size_t buf_pos = 0;
loff_t p = *ppos;
ssize_t ret;
int i;
struct regmap *map = file->private_data;
char *buf;
unsigned int val, start_reg;
if (*ppos < 0 || !count)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
regmap_calc_tot_len(map, buf, count);
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
for (i = start_reg; i <= map->max_register; i++) {
if (!regmap_readable(map, i))
continue;
if (regmap_precious(map, i))
continue;
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
if (buf_pos + map->debugfs_tot_len > count)
break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
map->debugfs_reg_len, i);
buf_pos += map->debugfs_reg_len + 2;
/* Format the value, write all X if we can't read */
ret = regmap_read(map, i, &val);
if (ret == 0)
snprintf(buf + buf_pos, count - buf_pos,
"%.*x", map->debugfs_val_len, val);
else
memset(buf + buf_pos, 'X',
map->debugfs_val_len);
buf_pos += 2 * map->format.val_bytes;
buf[buf_pos++] = '\n';
}
p += map->debugfs_tot_len;
}
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out;
}
*ppos += buf_pos;
out:
kfree(buf);
return ret;
}
#define REGMAP_ALLOW_WRITE_DEBUGFS
#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous especially when we have clients such as
* PMICs, therefore don't provide any real compile time configuration option
* for this feature, people who want to use this will need to modify
* the source code directly.
*/
static ssize_t regmap_map_write_file(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
char buf[32];
size_t buf_size;
char *start = buf;
unsigned long reg, value;
struct regmap *map = file->private_data;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
while (*start == ' ')
start++;
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
if (strict_strtoul(start, 16, &value))
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER);
regmap_write(map, reg, value);
return buf_size;
}
#else
#define regmap_map_write_file NULL
#endif
static const struct file_operations regmap_map_fops = {
.open = simple_open,
.read = regmap_map_read_file,
.write = regmap_map_write_file,
.llseek = default_llseek,
};
static ssize_t regmap_reg_ranges_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct regmap *map = file->private_data;
struct regmap_debugfs_off_cache *c;
loff_t p = 0;
size_t buf_pos = 0;
char *buf;
char *entry;
int ret;
if (*ppos < 0 || !count)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!entry) {
kfree(buf);
return -ENOMEM;
}
/* While we are at it, build the register dump cache
* now so the read() operation on the `registers' file
* can benefit from using the cache. We do not care
* about the file position information that is contained
* in the cache, just about the actual register blocks */
regmap_calc_tot_len(map, buf, count);
regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
/* Reset file pointer as the fixed-format of the `registers'
* file is not compatible with the `range' file */
p = 0;
mutex_lock(&map->cache_lock);
list_for_each_entry(c, &map->debugfs_off_cache, list) {
snprintf(entry, PAGE_SIZE, "%x-%x",
c->base_reg, c->max_reg);
if (p >= *ppos) {
if (buf_pos + 1 + strlen(entry) > count)
break;
snprintf(buf + buf_pos, count - buf_pos,
"%s", entry);
buf_pos += strlen(entry);
buf[buf_pos] = '\n';
buf_pos++;
}
p += strlen(entry) + 1;
}
mutex_unlock(&map->cache_lock);
kfree(entry);
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out_buf;
}
*ppos += buf_pos;
out_buf:
kfree(buf);
return ret;
}
static const struct file_operations regmap_reg_ranges_fops = {
.open = simple_open,
.read = regmap_reg_ranges_read_file,
.llseek = default_llseek,
};
static ssize_t regmap_access_read_file(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
int reg_len, tot_len;
size_t buf_pos = 0;
loff_t p = 0;
ssize_t ret;
int i;
struct regmap *map = file->private_data;
char *buf;
if (*ppos < 0 || !count)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Calculate the length of a fixed format */
reg_len = regmap_calc_reg_len(map->max_register, buf, count);
tot_len = reg_len + 10; /* ': R W V P\n' */
for (i = 0; i < map->max_register + 1; i++) {
/* Ignore registers which are neither readable nor writable */
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
continue;
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
if (buf_pos >= count - 1 - tot_len)
break;
/* Format the register */
snprintf(buf + buf_pos, count - buf_pos,
"%.*x: %c %c %c %c\n",
reg_len, i,
regmap_readable(map, i) ? 'y' : 'n',
regmap_writeable(map, i) ? 'y' : 'n',
regmap_volatile(map, i) ? 'y' : 'n',
regmap_precious(map, i) ? 'y' : 'n');
buf_pos += tot_len;
}
p += tot_len;
}
ret = buf_pos;
if (copy_to_user(user_buf, buf, buf_pos)) {
ret = -EFAULT;
goto out;
}
*ppos += buf_pos;
out:
kfree(buf);
return ret;
}
static const struct file_operations regmap_access_fops = {
.open = simple_open,
.read = regmap_access_read_file,
.llseek = default_llseek,
};
void regmap_debugfs_init(struct regmap *map)
{
map->debugfs = debugfs_create_dir(dev_name(map->dev),
regmap_debugfs_root);
if (!map->debugfs) {
dev_warn(map->dev, "Failed to create debugfs directory\n");
return;
}
INIT_LIST_HEAD(&map->debugfs_off_cache);
mutex_init(&map->cache_lock);
debugfs_create_file("name", 0400, map->debugfs,
map, ®map_name_fops);
debugfs_create_file("range", 0400, map->debugfs,
map, ®map_reg_ranges_fops);
if (map->max_register) {
debugfs_create_file("registers", 0400, map->debugfs,
map, ®map_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
map, ®map_access_fops);
}
if (map->cache_type) {
debugfs_create_bool("cache_only", 0400, map->debugfs,
&map->cache_only);
debugfs_create_bool("cache_dirty", 0400, map->debugfs,
&map->cache_dirty);
debugfs_create_bool("cache_bypass", 0400, map->debugfs,
&map->cache_bypass);
}
}
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
mutex_lock(&map->cache_lock);
regmap_debugfs_free_dump_cache(map);
mutex_unlock(&map->cache_lock);
}
void regmap_debugfs_initcall(void)
{
regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
if (!regmap_debugfs_root) {
pr_warn("regmap: Failed to create debugfs root\n");
return;
}
}
| gpl-2.0 |
janrinze/loox7xxport.loox2-6-22 | drivers/i2c/busses/i2c-amd756.c | 76 | 11706 | /*
amd756.c - Part of lm_sensors, Linux kernel modules for hardware
monitoring
Copyright (c) 1999-2002 Merlin Hughes <merlin@merlin.org>
Shamelessly ripped from i2c-piix4.c:
Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
2002-04-08: Added nForce support. (Csaba Halasz)
2002-10-03: Fixed nForce PnP I/O port. (Michael Steil)
2002-12-28: Rewritten into something that resembles a Linux driver (hch)
2003-11-29: Added back AMD8111 removed by the previous rewrite.
(Philip Pokorny)
*/
/*
Supports AMD756, AMD766, AMD768, AMD8111 and nVidia nForce
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <asm/io.h>
/* AMD756 SMBus address offsets */
#define SMB_ADDR_OFFSET 0xE0
#define SMB_IOSIZE 16
#define SMB_GLOBAL_STATUS (0x0 + amd756_ioport)
#define SMB_GLOBAL_ENABLE (0x2 + amd756_ioport)
#define SMB_HOST_ADDRESS (0x4 + amd756_ioport)
#define SMB_HOST_DATA (0x6 + amd756_ioport)
#define SMB_HOST_COMMAND (0x8 + amd756_ioport)
#define SMB_HOST_BLOCK_DATA (0x9 + amd756_ioport)
#define SMB_HAS_DATA (0xA + amd756_ioport)
#define SMB_HAS_DEVICE_ADDRESS (0xC + amd756_ioport)
#define SMB_HAS_HOST_ADDRESS (0xE + amd756_ioport)
#define SMB_SNOOP_ADDRESS (0xF + amd756_ioport)
/* PCI Address Constants */
/* address of I/O space */
#define SMBBA 0x058 /* mh */
#define SMBBANFORCE 0x014
/* general configuration */
#define SMBGCFG 0x041 /* mh */
/* silicon revision code */
#define SMBREV 0x008
/* Other settings */
#define MAX_TIMEOUT 500
/* AMD756 constants */
#define AMD756_QUICK 0x00
#define AMD756_BYTE 0x01
#define AMD756_BYTE_DATA 0x02
#define AMD756_WORD_DATA 0x03
#define AMD756_PROCESS_CALL 0x04
#define AMD756_BLOCK_DATA 0x05
static struct pci_driver amd756_driver;
static unsigned short amd756_ioport;
/*
SMBUS event = I/O 28-29 bit 11
see E0 for the status bits and enabled in E2
*/
#define GS_ABRT_STS (1 << 0)
#define GS_COL_STS (1 << 1)
#define GS_PRERR_STS (1 << 2)
#define GS_HST_STS (1 << 3)
#define GS_HCYC_STS (1 << 4)
#define GS_TO_STS (1 << 5)
#define GS_SMB_STS (1 << 11)
#define GS_CLEAR_STS (GS_ABRT_STS | GS_COL_STS | GS_PRERR_STS | \
GS_HCYC_STS | GS_TO_STS )
#define GE_CYC_TYPE_MASK (7)
#define GE_HOST_STC (1 << 3)
#define GE_ABORT (1 << 5)
static int amd756_transaction(struct i2c_adapter *adap)
{
int temp;
int result = 0;
int timeout = 0;
dev_dbg(&adap->dev, "Transaction (pre): GS=%04x, GE=%04x, ADD=%04x, "
"DAT=%04x\n", inw_p(SMB_GLOBAL_STATUS),
inw_p(SMB_GLOBAL_ENABLE), inw_p(SMB_HOST_ADDRESS),
inb_p(SMB_HOST_DATA));
/* Make sure the SMBus host is ready to start transmitting */
if ((temp = inw_p(SMB_GLOBAL_STATUS)) & (GS_HST_STS | GS_SMB_STS)) {
dev_dbg(&adap->dev, "SMBus busy (%04x). Waiting...\n", temp);
do {
msleep(1);
temp = inw_p(SMB_GLOBAL_STATUS);
} while ((temp & (GS_HST_STS | GS_SMB_STS)) &&
(timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout >= MAX_TIMEOUT) {
dev_dbg(&adap->dev, "Busy wait timeout (%04x)\n", temp);
goto abort;
}
timeout = 0;
}
/* start the transaction by setting the start bit */
outw_p(inw(SMB_GLOBAL_ENABLE) | GE_HOST_STC, SMB_GLOBAL_ENABLE);
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = inw_p(SMB_GLOBAL_STATUS);
} while ((temp & GS_HST_STS) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout >= MAX_TIMEOUT) {
dev_dbg(&adap->dev, "Completion timeout!\n");
goto abort;
}
if (temp & GS_PRERR_STS) {
result = -1;
dev_dbg(&adap->dev, "SMBus Protocol error (no response)!\n");
}
if (temp & GS_COL_STS) {
result = -1;
dev_warn(&adap->dev, "SMBus collision!\n");
}
if (temp & GS_TO_STS) {
result = -1;
dev_dbg(&adap->dev, "SMBus protocol timeout!\n");
}
if (temp & GS_HCYC_STS)
dev_dbg(&adap->dev, "SMBus protocol success!\n");
outw_p(GS_CLEAR_STS, SMB_GLOBAL_STATUS);
#ifdef DEBUG
if (((temp = inw_p(SMB_GLOBAL_STATUS)) & GS_CLEAR_STS) != 0x00) {
dev_dbg(&adap->dev,
"Failed reset at end of transaction (%04x)\n", temp);
}
#endif
dev_dbg(&adap->dev,
"Transaction (post): GS=%04x, GE=%04x, ADD=%04x, DAT=%04x\n",
inw_p(SMB_GLOBAL_STATUS), inw_p(SMB_GLOBAL_ENABLE),
inw_p(SMB_HOST_ADDRESS), inb_p(SMB_HOST_DATA));
return result;
abort:
dev_warn(&adap->dev, "Sending abort\n");
outw_p(inw(SMB_GLOBAL_ENABLE) | GE_ABORT, SMB_GLOBAL_ENABLE);
msleep(100);
outw_p(GS_CLEAR_STS, SMB_GLOBAL_STATUS);
return -1;
}
/* Return -1 on error. */
static s32 amd756_access(struct i2c_adapter * adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
int i, len;
/** TODO: Should I supporte the 10-bit transfers? */
switch (size) {
case I2C_SMBUS_PROC_CALL:
dev_dbg(&adap->dev, "I2C_SMBUS_PROC_CALL not supported!\n");
/* TODO: Well... It is supported, I'm just not sure what to do here... */
return -1;
case I2C_SMBUS_QUICK:
outw_p(((addr & 0x7f) << 1) | (read_write & 0x01),
SMB_HOST_ADDRESS);
size = AMD756_QUICK;
break;
case I2C_SMBUS_BYTE:
outw_p(((addr & 0x7f) << 1) | (read_write & 0x01),
SMB_HOST_ADDRESS);
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMB_HOST_DATA);
size = AMD756_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outw_p(((addr & 0x7f) << 1) | (read_write & 0x01),
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE)
outw_p(data->byte, SMB_HOST_DATA);
size = AMD756_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outw_p(((addr & 0x7f) << 1) | (read_write & 0x01),
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE)
outw_p(data->word, SMB_HOST_DATA); /* TODO: endian???? */
size = AMD756_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outw_p(((addr & 0x7f) << 1) | (read_write & 0x01),
SMB_HOST_ADDRESS);
outb_p(command, SMB_HOST_COMMAND);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len < 0)
len = 0;
if (len > 32)
len = 32;
outw_p(len, SMB_HOST_DATA);
/* i = inw_p(SMBHSTCNT); Reset SMBBLKDAT */
for (i = 1; i <= len; i++)
outb_p(data->block[i],
SMB_HOST_BLOCK_DATA);
}
size = AMD756_BLOCK_DATA;
break;
}
/* How about enabling interrupts... */
outw_p(size & GE_CYC_TYPE_MASK, SMB_GLOBAL_ENABLE);
if (amd756_transaction(adap)) /* Error in transaction */
return -1;
if ((read_write == I2C_SMBUS_WRITE) || (size == AMD756_QUICK))
return 0;
switch (size) {
case AMD756_BYTE:
data->byte = inw_p(SMB_HOST_DATA);
break;
case AMD756_BYTE_DATA:
data->byte = inw_p(SMB_HOST_DATA);
break;
case AMD756_WORD_DATA:
data->word = inw_p(SMB_HOST_DATA); /* TODO: endian???? */
break;
case AMD756_BLOCK_DATA:
data->block[0] = inw_p(SMB_HOST_DATA) & 0x3f;
if(data->block[0] > 32)
data->block[0] = 32;
/* i = inw_p(SMBHSTCNT); Reset SMBBLKDAT */
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb_p(SMB_HOST_BLOCK_DATA);
break;
}
return 0;
}
static u32 amd756_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_PROC_CALL;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = amd756_access,
.functionality = amd756_func,
};
struct i2c_adapter amd756_smbus = {
.owner = THIS_MODULE,
.id = I2C_HW_SMBUS_AMD756,
.class = I2C_CLASS_HWMON,
.algo = &smbus_algorithm,
};
enum chiptype { AMD756, AMD766, AMD768, NFORCE, AMD8111 };
static const char* chipname[] = {
"AMD756", "AMD766", "AMD768",
"nVidia nForce", "AMD8111",
};
static struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
.driver_data = AMD766 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7443),
.driver_data = AMD768 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS),
.driver_data = AMD8111 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS),
.driver_data = NFORCE },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, amd756_ids);
static int __devinit amd756_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int nforce = (id->driver_data == NFORCE);
int error;
u8 temp;
if (amd756_ioport) {
dev_err(&pdev->dev, "Only one device supported "
"(you have a strange motherboard, btw)\n");
return -ENODEV;
}
if (nforce) {
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
pci_read_config_word(pdev, SMBBANFORCE, &amd756_ioport);
amd756_ioport &= 0xfffc;
} else { /* amd */
if (PCI_FUNC(pdev->devfn) != 3)
return -ENODEV;
pci_read_config_byte(pdev, SMBGCFG, &temp);
if ((temp & 128) == 0) {
dev_err(&pdev->dev,
"Error: SMBus controller I/O not enabled!\n");
return -ENODEV;
}
/* Determine the address of the SMBus areas */
/* Technically it is a dword but... */
pci_read_config_word(pdev, SMBBA, &amd756_ioport);
amd756_ioport &= 0xff00;
amd756_ioport += SMB_ADDR_OFFSET;
}
if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) {
dev_err(&pdev->dev, "SMB region 0x%x already in use!\n",
amd756_ioport);
return -ENODEV;
}
pci_read_config_byte(pdev, SMBREV, &temp);
dev_dbg(&pdev->dev, "SMBREV = 0x%X\n", temp);
dev_dbg(&pdev->dev, "AMD756_smba = 0x%X\n", amd756_ioport);
/* set up the sysfs linkage to our parent device */
amd756_smbus.dev.parent = &pdev->dev;
sprintf(amd756_smbus.name, "SMBus %s adapter at %04x",
chipname[id->driver_data], amd756_ioport);
error = i2c_add_adapter(&amd756_smbus);
if (error) {
dev_err(&pdev->dev,
"Adapter registration failed, module not inserted\n");
goto out_err;
}
return 0;
out_err:
release_region(amd756_ioport, SMB_IOSIZE);
return error;
}
static void __devexit amd756_remove(struct pci_dev *dev)
{
i2c_del_adapter(&amd756_smbus);
release_region(amd756_ioport, SMB_IOSIZE);
}
static struct pci_driver amd756_driver = {
.name = "amd756_smbus",
.id_table = amd756_ids,
.probe = amd756_probe,
.remove = __devexit_p(amd756_remove),
};
static int __init amd756_init(void)
{
return pci_register_driver(&amd756_driver);
}
static void __exit amd756_exit(void)
{
pci_unregister_driver(&amd756_driver);
}
MODULE_AUTHOR("Merlin Hughes <merlin@merlin.org>");
MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(amd756_smbus);
module_init(amd756_init)
module_exit(amd756_exit)
| gpl-2.0 |
sac23/Sacs_Stock_Kernel | drivers/gpu/ion/ion_cma_heap.c | 332 | 8144 | /*
* drivers/gpu/ion/ion_cma_heap.c
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/ion.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/msm_ion.h>
#include <mach/iommu_domains.h>
#include <asm/cacheflush.h>
/* for ion_heap_ops structure */
#include "ion_priv.h"
#define ION_CMA_ALLOCATE_FAILED -1
struct ion_cma_buffer_info {
void *cpu_addr;
dma_addr_t handle;
struct sg_table *table;
bool is_cached;
};
static int cma_heap_has_outer_cache;
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replace by dma_common_get_sgtable
* as soon as it will avalaible.
*/
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (unlikely(ret))
return ret;
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return 0;
}
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
unsigned long flags)
{
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info;
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info) {
dev_err(dev, "Can't allocate buffer info\n");
return ION_CMA_ALLOCATE_FAILED;
}
if (!ION_IS_CACHED(flags))
info->cpu_addr = dma_alloc_writecombine(dev, len,
&(info->handle), 0);
else
info->cpu_addr = dma_alloc_nonconsistent(dev, len,
&(info->handle), 0);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
goto err;
}
info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!info->table) {
dev_err(dev, "Fail to allocate sg table\n");
goto err;
}
info->is_cached = ION_IS_CACHED(flags);
ion_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
/* keep this for memory release */
buffer->priv_virt = info;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
err:
kfree(info);
return ION_CMA_ALLOCATE_FAILED;
}
static void ion_cma_free(struct ion_buffer *buffer)
{
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
dev_dbg(dev, "Release buffer %p\n", buffer);
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
/* release sg table */
kfree(info->table);
kfree(info);
}
/* return physical address in addr */
static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
struct device *dev = heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
info->handle);
*addr = info->handle;
*len = buffer->size;
return 0;
}
struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
if (info->is_cached)
return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
info->handle, buffer->size);
else
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
info->handle, buffer->size);
}
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->cpu_addr;
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
int ion_cma_map_iommu(struct ion_buffer *buffer,
struct ion_iommu_map *data,
unsigned int domain_num,
unsigned int partition_num,
unsigned long align,
unsigned long iova_length,
unsigned long flags)
{
int ret = 0;
struct iommu_domain *domain;
unsigned long extra;
unsigned long extra_iova_addr;
struct ion_cma_buffer_info *info = buffer->priv_virt;
struct sg_table *table = info->table;
int prot = IOMMU_WRITE | IOMMU_READ;
data->mapped_size = iova_length;
if (!msm_use_iommu()) {
data->iova_addr = info->handle;
return 0;
}
extra = iova_length - buffer->size;
ret = msm_allocate_iova_address(domain_num, partition_num,
data->mapped_size, align,
&data->iova_addr);
if (ret)
goto out;
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
ret = -EINVAL;
goto out1;
}
ret = iommu_map_range(domain, data->iova_addr, table->sgl,
buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
goto out1;
}
extra_iova_addr = data->iova_addr + buffer->size;
if (extra) {
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
prot);
if (ret)
goto out2;
}
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
return ret;
}
void ion_cma_unmap_iommu(struct ion_iommu_map *data)
{
unsigned int domain_num;
unsigned int partition_num;
struct iommu_domain *domain;
if (!msm_use_iommu())
return;
domain_num = iommu_map_domain(data);
partition_num = iommu_map_partition(data);
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
return;
}
iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
return;
}
int ion_cma_cache_ops(struct ion_heap *heap,
struct ion_buffer *buffer, void *vaddr,
unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
return -EINVAL;
}
if (cma_heap_has_outer_cache) {
struct ion_cma_buffer_info *info = buffer->priv_virt;
outer_cache_op(info->handle, info->handle + length);
}
return 0;
}
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_dma = ion_cma_heap_map_dma,
.unmap_dma = ion_cma_heap_unmap_dma,
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
.map_iommu = ion_cma_map_iommu,
.unmap_iommu = ion_cma_unmap_iommu,
.cache_op = ion_cma_cache_ops,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
{
struct ion_heap *heap;
heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->ops = &ion_cma_ops;
/* set device as private heaps data, later it will be
* used to make the link with reserved CMA memory */
heap->priv = data->priv;
heap->type = ION_HEAP_TYPE_DMA;
cma_heap_has_outer_cache = data->has_outer_cache;
return heap;
}
void ion_cma_heap_destroy(struct ion_heap *heap)
{
kfree(heap);
}
| gpl-2.0 |
lacvapps/linux | drivers/clk/mmp/clk-of-pxa910.c | 588 | 11993 | /*
* pxa910 clock framework source file
*
* Copyright (C) 2012 Marvell
* Chao Xie <xiechao.mail@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/marvell,pxa910.h>
#include "clk.h"
#include "reset.h"
#define APBC_RTC 0x28
#define APBC_TWSI0 0x2c
#define APBC_KPC 0x18
#define APBC_UART0 0x0
#define APBC_UART1 0x4
#define APBC_GPIO 0x8
#define APBC_PWM0 0xc
#define APBC_PWM1 0x10
#define APBC_PWM2 0x14
#define APBC_PWM3 0x18
#define APBC_SSP0 0x1c
#define APBC_SSP1 0x20
#define APBC_SSP2 0x4c
#define APBC_TIMER0 0x30
#define APBC_TIMER1 0x44
#define APBCP_TWSI1 0x28
#define APBCP_UART2 0x1c
#define APMU_SDH0 0x54
#define APMU_SDH1 0x58
#define APMU_USB 0x5c
#define APMU_DISP0 0x4c
#define APMU_CCIC0 0x50
#define APMU_DFC 0x60
#define MPMU_UART_PLL 0x14
struct pxa910_clk_unit {
struct mmp_clk_unit unit;
void __iomem *mpmu_base;
void __iomem *apmu_base;
void __iomem *apbc_base;
void __iomem *apbcp_base;
};
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
{PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
{PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
{PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
{PXA910_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
};
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
{PXA910_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
{PXA910_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
{PXA910_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
{PXA910_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
{PXA910_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
{PXA910_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
{PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
{PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
{PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
{PXA910_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0},
{PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
{PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
{PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
{PXA910_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
};
static struct mmp_clk_factor_masks uart_factor_masks = {
.factor = 2,
.num_mask = 0x1fff,
.den_mask = 0x1fff,
.num_shift = 16,
.den_shift = 0,
};
static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 8125, .den = 1536}, /*14.745MHZ */
};
static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
{
struct clk *clk;
struct mmp_clk_unit *unit = &pxa_unit->unit;
mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
ARRAY_SIZE(fixed_rate_clks));
mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
ARRAY_SIZE(fixed_factor_clks));
clk = mmp_clk_register_factor("uart_pll", "pll1_4",
CLK_SET_RATE_PARENT,
pxa_unit->mpmu_base + MPMU_UART_PLL,
&uart_factor_masks, uart_factor_tbl,
ARRAY_SIZE(uart_factor_tbl), NULL);
mmp_clk_add(unit, PXA910_CLK_UART_PLL, clk);
}
static DEFINE_SPINLOCK(uart0_lock);
static DEFINE_SPINLOCK(uart1_lock);
static DEFINE_SPINLOCK(uart2_lock);
static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
static DEFINE_SPINLOCK(ssp0_lock);
static DEFINE_SPINLOCK(ssp1_lock);
static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
static DEFINE_SPINLOCK(timer0_lock);
static DEFINE_SPINLOCK(timer1_lock);
static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96"};
static DEFINE_SPINLOCK(reset_lock);
static struct mmp_param_mux_clk apbc_mux_clks[] = {
{0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
{0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
{0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
{0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
{0, "timer0_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER0, 4, 3, 0, &timer0_lock},
{0, "timer1_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER1, 4, 3, 0, &timer1_lock},
};
static struct mmp_param_mux_clk apbcp_mux_clks[] = {
{0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
};
static struct mmp_param_gate_clk apbc_gate_clks[] = {
{PXA910_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
{PXA910_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
{PXA910_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
{PXA910_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
{PXA910_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
{PXA910_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
{PXA910_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
{PXA910_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
/* The gate clocks has mux parent. */
{PXA910_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
{PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
{PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
{PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
{PXA910_CLK_TIMER0, "timer0_clk", "timer0_mux", CLK_SET_RATE_PARENT, APBC_TIMER0, 0x3, 0x3, 0x0, 0, &timer0_lock},
{PXA910_CLK_TIMER1, "timer1_clk", "timer1_mux", CLK_SET_RATE_PARENT, APBC_TIMER1, 0x3, 0x3, 0x0, 0, &timer1_lock},
};
static struct mmp_param_gate_clk apbcp_gate_clks[] = {
{PXA910_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBCP_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
/* The gate clocks has mux parent. */
{PXA910_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
};
static void pxa910_apb_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
{
struct mmp_clk_unit *unit = &pxa_unit->unit;
mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
ARRAY_SIZE(apbc_mux_clks));
mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->apbcp_base,
ARRAY_SIZE(apbcp_mux_clks));
mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
ARRAY_SIZE(apbc_gate_clks));
mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->apbcp_base,
ARRAY_SIZE(apbcp_gate_clks));
}
static DEFINE_SPINLOCK(sdh0_lock);
static DEFINE_SPINLOCK(sdh1_lock);
static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
static DEFINE_SPINLOCK(usb_lock);
static DEFINE_SPINLOCK(disp0_lock);
static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
static DEFINE_SPINLOCK(ccic0_lock);
static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
static struct mmp_param_mux_clk apmu_mux_clks[] = {
{0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
{0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
{0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
{0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
{0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
};
static struct mmp_param_div_clk apmu_div_clks[] = {
{0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
};
static struct mmp_param_gate_clk apmu_gate_clks[] = {
{PXA910_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
{PXA910_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
{PXA910_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
/* The gate clocks has mux parent. */
{PXA910_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
{PXA910_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
{PXA910_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{PXA910_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
{PXA910_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
{PXA910_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
};
static void pxa910_axi_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
{
struct mmp_clk_unit *unit = &pxa_unit->unit;
mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
ARRAY_SIZE(apmu_mux_clks));
mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
ARRAY_SIZE(apmu_div_clks));
mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
ARRAY_SIZE(apmu_gate_clks));
}
static void pxa910_clk_reset_init(struct device_node *np,
struct pxa910_clk_unit *pxa_unit)
{
struct mmp_clk_reset_cell *cells;
int i, base, nr_resets_apbc, nr_resets_apbcp, nr_resets;
nr_resets_apbc = ARRAY_SIZE(apbc_gate_clks);
nr_resets_apbcp = ARRAY_SIZE(apbcp_gate_clks);
nr_resets = nr_resets_apbc + nr_resets_apbcp;
cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
if (!cells)
return;
base = 0;
for (i = 0; i < nr_resets_apbc; i++) {
cells[base + i].clk_id = apbc_gate_clks[i].id;
cells[base + i].reg =
pxa_unit->apbc_base + apbc_gate_clks[i].offset;
cells[base + i].flags = 0;
cells[base + i].lock = apbc_gate_clks[i].lock;
cells[base + i].bits = 0x4;
}
base = nr_resets_apbc;
for (i = 0; i < nr_resets_apbcp; i++) {
cells[base + i].clk_id = apbcp_gate_clks[i].id;
cells[base + i].reg =
pxa_unit->apbc_base + apbc_gate_clks[i].offset;
cells[base + i].flags = 0;
cells[base + i].lock = apbc_gate_clks[i].lock;
cells[base + i].bits = 0x4;
}
mmp_clk_reset_register(np, cells, nr_resets);
}
static void __init pxa910_clk_init(struct device_node *np)
{
struct pxa910_clk_unit *pxa_unit;
pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
if (!pxa_unit)
return;
pxa_unit->mpmu_base = of_iomap(np, 0);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map mpmu registers\n");
return;
}
pxa_unit->apmu_base = of_iomap(np, 1);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map apmu registers\n");
return;
}
pxa_unit->apbc_base = of_iomap(np, 2);
if (!pxa_unit->apbc_base) {
pr_err("failed to map apbc registers\n");
return;
}
pxa_unit->apbcp_base = of_iomap(np, 3);
if (!pxa_unit->mpmu_base) {
pr_err("failed to map apbcp registers\n");
return;
}
mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
pxa910_pll_init(pxa_unit);
pxa910_apb_periph_clk_init(pxa_unit);
pxa910_axi_periph_clk_init(pxa_unit);
pxa910_clk_reset_init(np, pxa_unit);
}
CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
| gpl-2.0 |
kenkit/htc-kernel-saga | drivers/media/dvb/frontends/au8522_dig.c | 1100 | 22427 | /*
Auvitek AU8522 QAM/8VSB demodulator driver
Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/delay.h>
#include "dvb_frontend.h"
#include "au8522.h"
#include "au8522_priv.h"
static int debug;
/* Despite the name "hybrid_tuner", the framework works just as well for
hybrid demodulators as well... */
static LIST_HEAD(hybrid_tuner_instance_list);
static DEFINE_MUTEX(au8522_list_mutex);
#define dprintk(arg...)\
do { if (debug)\
printk(arg);\
} while (0)
/* 16 bit registers, 8 bit values */
int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
{
int ret;
u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
struct i2c_msg msg = { .addr = state->config->demod_address,
.flags = 0, .buf = buf, .len = 3 };
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
"ret == %i)\n", __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
u8 au8522_readreg(struct au8522_state *state, u16 reg)
{
int ret;
u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{ .addr = state->config->demod_address, .flags = 0,
.buf = b0, .len = 2 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD,
.buf = b1, .len = 1 } };
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
printk(KERN_ERR "%s: readreg error (ret == %i)\n",
__func__, ret);
return b1[0];
}
static int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s(%d)\n", __func__, enable);
if (state->operational_mode == AU8522_ANALOG_MODE) {
/* We're being asked to manage the gate even though we're
not in digital mode. This can occur if we get switched
over to analog mode before the dvb_frontend kernel thread
has completely shutdown */
return 0;
}
if (enable)
return au8522_writereg(state, 0x106, 1);
else
return au8522_writereg(state, 0x106, 0);
}
struct mse2snr_tab {
u16 val;
u16 data;
};
/* VSB SNR lookup table */
static struct mse2snr_tab vsb_mse2snr_tab[] = {
{ 0, 270 },
{ 2, 250 },
{ 3, 240 },
{ 5, 230 },
{ 7, 220 },
{ 9, 210 },
{ 12, 200 },
{ 13, 195 },
{ 15, 190 },
{ 17, 185 },
{ 19, 180 },
{ 21, 175 },
{ 24, 170 },
{ 27, 165 },
{ 31, 160 },
{ 32, 158 },
{ 33, 156 },
{ 36, 152 },
{ 37, 150 },
{ 39, 148 },
{ 40, 146 },
{ 41, 144 },
{ 43, 142 },
{ 44, 140 },
{ 48, 135 },
{ 50, 130 },
{ 43, 142 },
{ 53, 125 },
{ 56, 120 },
{ 256, 115 },
};
/* QAM64 SNR lookup table */
static struct mse2snr_tab qam64_mse2snr_tab[] = {
{ 15, 0 },
{ 16, 290 },
{ 17, 288 },
{ 18, 286 },
{ 19, 284 },
{ 20, 282 },
{ 21, 281 },
{ 22, 279 },
{ 23, 277 },
{ 24, 275 },
{ 25, 273 },
{ 26, 271 },
{ 27, 269 },
{ 28, 268 },
{ 29, 266 },
{ 30, 264 },
{ 31, 262 },
{ 32, 260 },
{ 33, 259 },
{ 34, 258 },
{ 35, 256 },
{ 36, 255 },
{ 37, 254 },
{ 38, 252 },
{ 39, 251 },
{ 40, 250 },
{ 41, 249 },
{ 42, 248 },
{ 43, 246 },
{ 44, 245 },
{ 45, 244 },
{ 46, 242 },
{ 47, 241 },
{ 48, 240 },
{ 50, 239 },
{ 51, 238 },
{ 53, 237 },
{ 54, 236 },
{ 56, 235 },
{ 57, 234 },
{ 59, 233 },
{ 60, 232 },
{ 62, 231 },
{ 63, 230 },
{ 65, 229 },
{ 67, 228 },
{ 68, 227 },
{ 70, 226 },
{ 71, 225 },
{ 73, 224 },
{ 74, 223 },
{ 76, 222 },
{ 78, 221 },
{ 80, 220 },
{ 82, 219 },
{ 85, 218 },
{ 88, 217 },
{ 90, 216 },
{ 92, 215 },
{ 93, 214 },
{ 94, 212 },
{ 95, 211 },
{ 97, 210 },
{ 99, 209 },
{ 101, 208 },
{ 102, 207 },
{ 104, 206 },
{ 107, 205 },
{ 111, 204 },
{ 114, 203 },
{ 118, 202 },
{ 122, 201 },
{ 125, 200 },
{ 128, 199 },
{ 130, 198 },
{ 132, 197 },
{ 256, 190 },
};
/* QAM256 SNR lookup table */
static struct mse2snr_tab qam256_mse2snr_tab[] = {
{ 16, 0 },
{ 17, 400 },
{ 18, 398 },
{ 19, 396 },
{ 20, 394 },
{ 21, 392 },
{ 22, 390 },
{ 23, 388 },
{ 24, 386 },
{ 25, 384 },
{ 26, 382 },
{ 27, 380 },
{ 28, 379 },
{ 29, 378 },
{ 30, 377 },
{ 31, 376 },
{ 32, 375 },
{ 33, 374 },
{ 34, 373 },
{ 35, 372 },
{ 36, 371 },
{ 37, 370 },
{ 38, 362 },
{ 39, 354 },
{ 40, 346 },
{ 41, 338 },
{ 42, 330 },
{ 43, 328 },
{ 44, 326 },
{ 45, 324 },
{ 46, 322 },
{ 47, 320 },
{ 48, 319 },
{ 49, 318 },
{ 50, 317 },
{ 51, 316 },
{ 52, 315 },
{ 53, 314 },
{ 54, 313 },
{ 55, 312 },
{ 56, 311 },
{ 57, 310 },
{ 58, 308 },
{ 59, 306 },
{ 60, 304 },
{ 61, 302 },
{ 62, 300 },
{ 63, 298 },
{ 65, 295 },
{ 68, 294 },
{ 70, 293 },
{ 73, 292 },
{ 76, 291 },
{ 78, 290 },
{ 79, 289 },
{ 81, 288 },
{ 82, 287 },
{ 83, 286 },
{ 84, 285 },
{ 85, 284 },
{ 86, 283 },
{ 88, 282 },
{ 89, 281 },
{ 256, 280 },
};
static int au8522_mse2snr_lookup(struct mse2snr_tab *tab, int sz, int mse,
u16 *snr)
{
int i, ret = -EINVAL;
dprintk("%s()\n", __func__);
for (i = 0; i < sz; i++) {
if (mse < tab[i].val) {
*snr = tab[i].data;
ret = 0;
break;
}
}
dprintk("%s() snr=%d\n", __func__, *snr);
return ret;
}
static int au8522_set_if(struct dvb_frontend *fe, enum au8522_if_freq if_freq)
{
struct au8522_state *state = fe->demodulator_priv;
u8 r0b5, r0b6, r0b7;
char *ifmhz;
switch (if_freq) {
case AU8522_IF_3_25MHZ:
ifmhz = "3.25";
r0b5 = 0x00;
r0b6 = 0x3d;
r0b7 = 0xa0;
break;
case AU8522_IF_4MHZ:
ifmhz = "4.00";
r0b5 = 0x00;
r0b6 = 0x4b;
r0b7 = 0xd9;
break;
case AU8522_IF_6MHZ:
ifmhz = "6.00";
r0b5 = 0xfb;
r0b6 = 0x8e;
r0b7 = 0x39;
break;
default:
dprintk("%s() IF Frequency not supported\n", __func__);
return -EINVAL;
}
dprintk("%s() %s MHz\n", __func__, ifmhz);
au8522_writereg(state, 0x80b5, r0b5);
au8522_writereg(state, 0x80b6, r0b6);
au8522_writereg(state, 0x80b7, r0b7);
return 0;
}
/* VSB Modulation table */
static struct {
u16 reg;
u16 data;
} VSB_mod_tab[] = {
{ 0x8090, 0x84 },
{ 0x4092, 0x11 },
{ 0x2005, 0x00 },
{ 0x8091, 0x80 },
{ 0x80a3, 0x0c },
{ 0x80a4, 0xe8 },
{ 0x8081, 0xc4 },
{ 0x80a5, 0x40 },
{ 0x80a7, 0x40 },
{ 0x80a6, 0x67 },
{ 0x8262, 0x20 },
{ 0x821c, 0x30 },
{ 0x80d8, 0x1a },
{ 0x8227, 0xa0 },
{ 0x8121, 0xff },
{ 0x80a8, 0xf0 },
{ 0x80a9, 0x05 },
{ 0x80aa, 0x77 },
{ 0x80ab, 0xf0 },
{ 0x80ac, 0x05 },
{ 0x80ad, 0x77 },
{ 0x80ae, 0x41 },
{ 0x80af, 0x66 },
{ 0x821b, 0xcc },
{ 0x821d, 0x80 },
{ 0x80a4, 0xe8 },
{ 0x8231, 0x13 },
};
/* QAM64 Modulation table */
static struct {
u16 reg;
u16 data;
} QAM64_mod_tab[] = {
{ 0x00a3, 0x09 },
{ 0x00a4, 0x00 },
{ 0x0081, 0xc4 },
{ 0x00a5, 0x40 },
{ 0x00aa, 0x77 },
{ 0x00ad, 0x77 },
{ 0x00a6, 0x67 },
{ 0x0262, 0x20 },
{ 0x021c, 0x30 },
{ 0x00b8, 0x3e },
{ 0x00b9, 0xf0 },
{ 0x00ba, 0x01 },
{ 0x00bb, 0x18 },
{ 0x00bc, 0x50 },
{ 0x00bd, 0x00 },
{ 0x00be, 0xea },
{ 0x00bf, 0xef },
{ 0x00c0, 0xfc },
{ 0x00c1, 0xbd },
{ 0x00c2, 0x1f },
{ 0x00c3, 0xfc },
{ 0x00c4, 0xdd },
{ 0x00c5, 0xaf },
{ 0x00c6, 0x00 },
{ 0x00c7, 0x38 },
{ 0x00c8, 0x30 },
{ 0x00c9, 0x05 },
{ 0x00ca, 0x4a },
{ 0x00cb, 0xd0 },
{ 0x00cc, 0x01 },
{ 0x00cd, 0xd9 },
{ 0x00ce, 0x6f },
{ 0x00cf, 0xf9 },
{ 0x00d0, 0x70 },
{ 0x00d1, 0xdf },
{ 0x00d2, 0xf7 },
{ 0x00d3, 0xc2 },
{ 0x00d4, 0xdf },
{ 0x00d5, 0x02 },
{ 0x00d6, 0x9a },
{ 0x00d7, 0xd0 },
{ 0x0250, 0x0d },
{ 0x0251, 0xcd },
{ 0x0252, 0xe0 },
{ 0x0253, 0x05 },
{ 0x0254, 0xa7 },
{ 0x0255, 0xff },
{ 0x0256, 0xed },
{ 0x0257, 0x5b },
{ 0x0258, 0xae },
{ 0x0259, 0xe6 },
{ 0x025a, 0x3d },
{ 0x025b, 0x0f },
{ 0x025c, 0x0d },
{ 0x025d, 0xea },
{ 0x025e, 0xf2 },
{ 0x025f, 0x51 },
{ 0x0260, 0xf5 },
{ 0x0261, 0x06 },
{ 0x021a, 0x00 },
{ 0x0546, 0x40 },
{ 0x0210, 0xc7 },
{ 0x0211, 0xaa },
{ 0x0212, 0xab },
{ 0x0213, 0x02 },
{ 0x0502, 0x00 },
{ 0x0121, 0x04 },
{ 0x0122, 0x04 },
{ 0x052e, 0x10 },
{ 0x00a4, 0xca },
{ 0x00a7, 0x40 },
{ 0x0526, 0x01 },
};
/* QAM256 Modulation table */
static struct {
u16 reg;
u16 data;
} QAM256_mod_tab[] = {
{ 0x80a3, 0x09 },
{ 0x80a4, 0x00 },
{ 0x8081, 0xc4 },
{ 0x80a5, 0x40 },
{ 0x80aa, 0x77 },
{ 0x80ad, 0x77 },
{ 0x80a6, 0x67 },
{ 0x8262, 0x20 },
{ 0x821c, 0x30 },
{ 0x80b8, 0x3e },
{ 0x80b9, 0xf0 },
{ 0x80ba, 0x01 },
{ 0x80bb, 0x18 },
{ 0x80bc, 0x50 },
{ 0x80bd, 0x00 },
{ 0x80be, 0xea },
{ 0x80bf, 0xef },
{ 0x80c0, 0xfc },
{ 0x80c1, 0xbd },
{ 0x80c2, 0x1f },
{ 0x80c3, 0xfc },
{ 0x80c4, 0xdd },
{ 0x80c5, 0xaf },
{ 0x80c6, 0x00 },
{ 0x80c7, 0x38 },
{ 0x80c8, 0x30 },
{ 0x80c9, 0x05 },
{ 0x80ca, 0x4a },
{ 0x80cb, 0xd0 },
{ 0x80cc, 0x01 },
{ 0x80cd, 0xd9 },
{ 0x80ce, 0x6f },
{ 0x80cf, 0xf9 },
{ 0x80d0, 0x70 },
{ 0x80d1, 0xdf },
{ 0x80d2, 0xf7 },
{ 0x80d3, 0xc2 },
{ 0x80d4, 0xdf },
{ 0x80d5, 0x02 },
{ 0x80d6, 0x9a },
{ 0x80d7, 0xd0 },
{ 0x8250, 0x0d },
{ 0x8251, 0xcd },
{ 0x8252, 0xe0 },
{ 0x8253, 0x05 },
{ 0x8254, 0xa7 },
{ 0x8255, 0xff },
{ 0x8256, 0xed },
{ 0x8257, 0x5b },
{ 0x8258, 0xae },
{ 0x8259, 0xe6 },
{ 0x825a, 0x3d },
{ 0x825b, 0x0f },
{ 0x825c, 0x0d },
{ 0x825d, 0xea },
{ 0x825e, 0xf2 },
{ 0x825f, 0x51 },
{ 0x8260, 0xf5 },
{ 0x8261, 0x06 },
{ 0x821a, 0x00 },
{ 0x8546, 0x40 },
{ 0x8210, 0x26 },
{ 0x8211, 0xf6 },
{ 0x8212, 0x84 },
{ 0x8213, 0x02 },
{ 0x8502, 0x01 },
{ 0x8121, 0x04 },
{ 0x8122, 0x04 },
{ 0x852e, 0x10 },
{ 0x80a4, 0xca },
{ 0x80a7, 0x40 },
{ 0x8526, 0x01 },
};
static int au8522_enable_modulation(struct dvb_frontend *fe,
fe_modulation_t m)
{
struct au8522_state *state = fe->demodulator_priv;
int i;
dprintk("%s(0x%08x)\n", __func__, m);
switch (m) {
case VSB_8:
dprintk("%s() VSB_8\n", __func__);
for (i = 0; i < ARRAY_SIZE(VSB_mod_tab); i++)
au8522_writereg(state,
VSB_mod_tab[i].reg,
VSB_mod_tab[i].data);
au8522_set_if(fe, state->config->vsb_if);
break;
case QAM_64:
dprintk("%s() QAM 64\n", __func__);
for (i = 0; i < ARRAY_SIZE(QAM64_mod_tab); i++)
au8522_writereg(state,
QAM64_mod_tab[i].reg,
QAM64_mod_tab[i].data);
au8522_set_if(fe, state->config->qam_if);
break;
case QAM_256:
dprintk("%s() QAM 256\n", __func__);
for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
au8522_writereg(state,
QAM256_mod_tab[i].reg,
QAM256_mod_tab[i].data);
au8522_set_if(fe, state->config->qam_if);
break;
default:
dprintk("%s() Invalid modulation\n", __func__);
return -EINVAL;
}
state->current_modulation = m;
return 0;
}
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
static int au8522_set_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *p)
{
struct au8522_state *state = fe->demodulator_priv;
int ret = -EINVAL;
dprintk("%s(frequency=%d)\n", __func__, p->frequency);
if ((state->current_frequency == p->frequency) &&
(state->current_modulation == p->u.vsb.modulation))
return 0;
au8522_enable_modulation(fe, p->u.vsb.modulation);
/* Allow the demod to settle */
msleep(100);
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = fe->ops.tuner_ops.set_params(fe, p);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
if (ret < 0)
return ret;
state->current_frequency = p->frequency;
return 0;
}
/* Reset the demod hardware and reset all of the configuration registers
to a default state. */
int au8522_init(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
state->operational_mode = AU8522_DIGITAL_MODE;
/* Clear out any state associated with the digital side of the
chip, so that when it gets powered back up it won't think
that it is already tuned */
state->current_frequency = 0;
au8522_writereg(state, 0xa4, 1 << 5);
au8522_i2c_gate_ctrl(fe, 1);
return 0;
}
static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
{
struct au8522_led_config *led_config = state->config->led_cfg;
u8 val;
/* bail out if we cant control an LED */
if (!led_config || !led_config->gpio_output ||
!led_config->gpio_output_enable || !led_config->gpio_output_disable)
return 0;
val = au8522_readreg(state, 0x4000 |
(led_config->gpio_output & ~0xc000));
if (onoff) {
/* enable GPIO output */
val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
val |= (led_config->gpio_output_enable & 0xff);
} else {
/* disable GPIO output */
val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
val |= (led_config->gpio_output_disable & 0xff);
}
return au8522_writereg(state, 0x8000 |
(led_config->gpio_output & ~0xc000), val);
}
/* led = 0 | off
* led = 1 | signal ok
* led = 2 | signal strong
* led < 0 | only light led if leds are currently off
*/
static int au8522_led_ctrl(struct au8522_state *state, int led)
{
struct au8522_led_config *led_config = state->config->led_cfg;
int i, ret = 0;
/* bail out if we cant control an LED */
if (!led_config || !led_config->gpio_leds ||
!led_config->num_led_states || !led_config->led_states)
return 0;
if (led < 0) {
/* if LED is already lit, then leave it as-is */
if (state->led_state)
return 0;
else
led *= -1;
}
/* toggle LED if changing state */
if (state->led_state != led) {
u8 val;
dprintk("%s: %d\n", __func__, led);
au8522_led_gpio_enable(state, 1);
val = au8522_readreg(state, 0x4000 |
(led_config->gpio_leds & ~0xc000));
/* start with all leds off */
for (i = 0; i < led_config->num_led_states; i++)
val &= ~led_config->led_states[i];
/* set selected LED state */
if (led < led_config->num_led_states)
val |= led_config->led_states[led];
else if (led_config->num_led_states)
val |=
led_config->led_states[led_config->num_led_states - 1];
ret = au8522_writereg(state, 0x8000 |
(led_config->gpio_leds & ~0xc000), val);
if (ret < 0)
return ret;
state->led_state = led;
if (led == 0)
au8522_led_gpio_enable(state, 0);
}
return 0;
}
int au8522_sleep(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
/* Only power down if the digital side is currently using the chip */
if (state->operational_mode == AU8522_ANALOG_MODE) {
/* We're not in one of the expected power modes, which means
that the DVB thread is probably telling us to go to sleep
even though the analog frontend has already started using
the chip. So ignore the request */
return 0;
}
/* turn off led */
au8522_led_ctrl(state, 0);
/* Power down the chip */
au8522_writereg(state, 0xa4, 1 << 5);
state->current_frequency = 0;
return 0;
}
static int au8522_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct au8522_state *state = fe->demodulator_priv;
u8 reg;
u32 tuner_status = 0;
*status = 0;
if (state->current_modulation == VSB_8) {
dprintk("%s() Checking VSB_8\n", __func__);
reg = au8522_readreg(state, 0x4088);
if ((reg & 0x03) == 0x03)
*status |= FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI;
} else {
dprintk("%s() Checking QAM\n", __func__);
reg = au8522_readreg(state, 0x4541);
if (reg & 0x80)
*status |= FE_HAS_VITERBI;
if (reg & 0x20)
*status |= FE_HAS_LOCK | FE_HAS_SYNC;
}
switch (state->config->status_mode) {
case AU8522_DEMODLOCKING:
dprintk("%s() DEMODLOCKING\n", __func__);
if (*status & FE_HAS_VITERBI)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
case AU8522_TUNERLOCKING:
/* Get the tuner status */
dprintk("%s() TUNERLOCKING\n", __func__);
if (fe->ops.tuner_ops.get_status) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.get_status(fe, &tuner_status);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
if (tuner_status)
*status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
break;
}
state->fe_status = *status;
if (*status & FE_HAS_LOCK)
/* turn on LED, if it isn't on already */
au8522_led_ctrl(state, -1);
else
/* turn off LED */
au8522_led_ctrl(state, 0);
dprintk("%s() status 0x%08x\n", __func__, *status);
return 0;
}
static int au8522_led_status(struct au8522_state *state, const u16 *snr)
{
struct au8522_led_config *led_config = state->config->led_cfg;
int led;
u16 strong;
/* bail out if we cant control an LED */
if (!led_config)
return 0;
if (0 == (state->fe_status & FE_HAS_LOCK))
return au8522_led_ctrl(state, 0);
else if (state->current_modulation == QAM_256)
strong = led_config->qam256_strong;
else if (state->current_modulation == QAM_64)
strong = led_config->qam64_strong;
else /* (state->current_modulation == VSB_8) */
strong = led_config->vsb8_strong;
if (*snr >= strong)
led = 2;
else
led = 1;
if ((state->led_state) &&
(((strong < *snr) ? (*snr - strong) : (strong - *snr)) <= 10))
/* snr didn't change enough to bother
* changing the color of the led */
return 0;
return au8522_led_ctrl(state, led);
}
static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct au8522_state *state = fe->demodulator_priv;
int ret = -EINVAL;
dprintk("%s()\n", __func__);
if (state->current_modulation == QAM_256)
ret = au8522_mse2snr_lookup(qam256_mse2snr_tab,
ARRAY_SIZE(qam256_mse2snr_tab),
au8522_readreg(state, 0x4522),
snr);
else if (state->current_modulation == QAM_64)
ret = au8522_mse2snr_lookup(qam64_mse2snr_tab,
ARRAY_SIZE(qam64_mse2snr_tab),
au8522_readreg(state, 0x4522),
snr);
else /* VSB_8 */
ret = au8522_mse2snr_lookup(vsb_mse2snr_tab,
ARRAY_SIZE(vsb_mse2snr_tab),
au8522_readreg(state, 0x4311),
snr);
if (state->config->led_cfg)
au8522_led_status(state, snr);
return ret;
}
static int au8522_read_signal_strength(struct dvb_frontend *fe,
u16 *signal_strength)
{
return au8522_read_snr(fe, signal_strength);
}
static int au8522_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct au8522_state *state = fe->demodulator_priv;
if (state->current_modulation == VSB_8)
*ucblocks = au8522_readreg(state, 0x4087);
else
*ucblocks = au8522_readreg(state, 0x4543);
return 0;
}
static int au8522_read_ber(struct dvb_frontend *fe, u32 *ber)
{
return au8522_read_ucblocks(fe, ber);
}
static int au8522_get_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *p)
{
struct au8522_state *state = fe->demodulator_priv;
p->frequency = state->current_frequency;
p->u.vsb.modulation = state->current_modulation;
return 0;
}
static int au8522_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static struct dvb_frontend_ops au8522_ops;
int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
u8 client_address)
{
int ret;
mutex_lock(&au8522_list_mutex);
ret = hybrid_tuner_request_state(struct au8522_state, (*state),
hybrid_tuner_instance_list,
i2c, client_address, "au8522");
mutex_unlock(&au8522_list_mutex);
return ret;
}
void au8522_release_state(struct au8522_state *state)
{
mutex_lock(&au8522_list_mutex);
if (state != NULL)
hybrid_tuner_release_state(state);
mutex_unlock(&au8522_list_mutex);
}
static void au8522_release(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
au8522_release_state(state);
}
struct dvb_frontend *au8522_attach(const struct au8522_config *config,
struct i2c_adapter *i2c)
{
struct au8522_state *state = NULL;
int instance;
/* allocate memory for the internal state */
instance = au8522_get_state(&state, i2c, config->demod_address);
switch (instance) {
case 0:
dprintk("%s state allocation failed\n", __func__);
break;
case 1:
/* new demod instance */
dprintk("%s using new instance\n", __func__);
break;
default:
/* existing demod instance */
dprintk("%s using existing instance\n", __func__);
break;
}
/* setup the state */
state->config = config;
state->i2c = i2c;
state->operational_mode = AU8522_DIGITAL_MODE;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &au8522_ops,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
if (au8522_init(&state->frontend) != 0) {
printk(KERN_ERR "%s: Failed to initialize correctly\n",
__func__);
goto error;
}
/* Note: Leaving the I2C gate open here. */
au8522_i2c_gate_ctrl(&state->frontend, 1);
return &state->frontend;
error:
au8522_release_state(state);
return NULL;
}
EXPORT_SYMBOL(au8522_attach);
static struct dvb_frontend_ops au8522_ops = {
.info = {
.name = "Auvitek AU8522 QAM/8VSB Frontend",
.type = FE_ATSC,
.frequency_min = 54000000,
.frequency_max = 858000000,
.frequency_stepsize = 62500,
.caps = FE_CAN_QAM_64 | FE_CAN_QAM_256 | FE_CAN_8VSB
},
.init = au8522_init,
.sleep = au8522_sleep,
.i2c_gate_ctrl = au8522_i2c_gate_ctrl,
.set_frontend = au8522_set_frontend,
.get_frontend = au8522_get_frontend,
.get_tune_settings = au8522_get_tune_settings,
.read_status = au8522_read_status,
.read_ber = au8522_read_ber,
.read_signal_strength = au8522_read_signal_strength,
.read_snr = au8522_read_snr,
.read_ucblocks = au8522_read_ucblocks,
.release = au8522_release,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hui-Zhi/gpu_cgroup_kernel | drivers/cpufreq/pcc-cpufreq.c | 1100 | 15625 | /*
* pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
*
* Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
* INFRINGEMENT. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/cpufreq.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <acpi/processor.h>
#define PCC_VERSION "1.10.00"
#define POLL_LOOPS 300
#define CMD_COMPLETE 0x1
#define CMD_GET_FREQ 0x0
#define CMD_SET_FREQ 0x1
#define BUF_SZ 4
struct pcc_register_resource {
u8 descriptor;
u16 length;
u8 space_id;
u8 bit_width;
u8 bit_offset;
u8 access_size;
u64 address;
} __attribute__ ((packed));
struct pcc_memory_resource {
u8 descriptor;
u16 length;
u8 space_id;
u8 resource_usage;
u8 type_specific;
u64 granularity;
u64 minimum;
u64 maximum;
u64 translation_offset;
u64 address_length;
} __attribute__ ((packed));
static struct cpufreq_driver pcc_cpufreq_driver;
struct pcc_header {
u32 signature;
u16 length;
u8 major;
u8 minor;
u32 features;
u16 command;
u16 status;
u32 latency;
u32 minimum_time;
u32 maximum_time;
u32 nominal;
u32 throttled_frequency;
u32 minimum_frequency;
};
static void __iomem *pcch_virt_addr;
static struct pcc_header __iomem *pcch_hdr;
static DEFINE_SPINLOCK(pcc_lock);
static struct acpi_generic_address doorbell;
static u64 doorbell_preserve;
static u64 doorbell_write;
static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
struct pcc_cpu {
u32 input_offset;
u32 output_offset;
};
static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
{
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
static inline void pcc_cmd(void)
{
u64 doorbell_value;
int i;
acpi_read(&doorbell_value, &doorbell);
acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
&doorbell);
for (i = 0; i < POLL_LOOPS; i++) {
if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
break;
}
}
static inline void pcc_clear_mapping(void)
{
if (pcch_virt_addr)
iounmap(pcch_virt_addr);
pcch_virt_addr = NULL;
}
static unsigned int pcc_get_freq(unsigned int cpu)
{
struct pcc_cpu *pcc_cpu_data;
unsigned int curr_freq;
unsigned int freq_limit;
u16 status;
u32 input_buffer;
u32 output_buffer;
spin_lock(&pcc_lock);
pr_debug("get: get_freq for CPU %d\n", cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
input_buffer = 0x1;
iowrite32(input_buffer,
(pcch_virt_addr + pcc_cpu_data->input_offset));
iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
pcc_cmd();
output_buffer =
ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
/* Clear the input buffer - we are done with the current command */
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
if (status != CMD_COMPLETE) {
pr_debug("get: FAILED: for CPU %d, status is %d\n",
cpu, status);
goto cmd_incomplete;
}
iowrite16(0, &pcch_hdr->status);
curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
/ 100) * 1000);
pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is "
"0x%p, contains a value of: 0x%x. Speed is: %d MHz\n",
cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
output_buffer, curr_freq);
freq_limit = (output_buffer >> 8) & 0xff;
if (freq_limit != 0xff) {
pr_debug("get: frequency for cpu %d is being temporarily"
" capped at %d\n", cpu, curr_freq);
}
spin_unlock(&pcc_lock);
return curr_freq;
cmd_incomplete:
iowrite16(0, &pcch_hdr->status);
spin_unlock(&pcc_lock);
return 0;
}
static int pcc_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct pcc_cpu *pcc_cpu_data;
struct cpufreq_freqs freqs;
u16 status;
u32 input_buffer;
int cpu;
cpu = policy->cpu;
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
pr_debug("target: CPU %d should go to target freq: %d "
"(virtual) input_offset is 0x%p\n",
cpu, target_freq,
(pcch_virt_addr + pcc_cpu_data->input_offset));
freqs.old = policy->cur;
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
spin_lock(&pcc_lock);
input_buffer = 0x1 | (((target_freq * 100)
/ (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
iowrite32(input_buffer,
(pcch_virt_addr + pcc_cpu_data->input_offset));
iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
pcc_cmd();
/* Clear the input buffer - we are done with the current command */
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
cpu, status);
return -EINVAL;
}
pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
return 0;
}
static int pcc_get_offset(int cpu)
{
acpi_status status;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *pccp, *offset;
struct pcc_cpu *pcc_cpu_data;
struct acpi_processor *pr;
int ret = 0;
pr = per_cpu(processors, cpu);
pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
if (!pr)
return -ENODEV;
status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
pccp = buffer.pointer;
if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
};
offset = &(pccp->package.elements[0]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto out_free;
}
pcc_cpu_data->input_offset = offset->integer.value;
offset = &(pccp->package.elements[1]);
if (!offset || offset->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto out_free;
}
pcc_cpu_data->output_offset = offset->integer.value;
memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data "
"input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
out_free:
kfree(buffer.pointer);
return ret;
}
static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
{
acpi_status status;
struct acpi_object_list input;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object in_params[4];
union acpi_object *out_obj;
u32 capabilities[2];
u32 errors;
u32 supported;
int ret = 0;
input.count = 4;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = 1;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = 2;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 8;
in_params[3].buffer.pointer = (u8 *)&capabilities;
capabilities[0] = OSC_QUERY_ENABLE;
capabilities[1] = 0x1;
status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!output.length)
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) {
ret = -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1)) {
ret = -ENODEV;
goto out_free;
}
kfree(output.pointer);
capabilities[0] = 0x0;
capabilities[1] = 0x1;
status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!output.length)
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
if (errors) {
ret = -ENODEV;
goto out_free;
}
supported = *((u32 *)(out_obj->buffer.pointer + 4));
if (!(supported & 0x1)) {
ret = -ENODEV;
goto out_free;
}
out_free:
kfree(output.pointer);
return ret;
}
static int __init pcc_cpufreq_probe(void)
{
acpi_status status;
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
struct pcc_memory_resource *mem_resource;
struct pcc_register_resource *reg_resource;
union acpi_object *out_obj, *member;
acpi_handle handle, osc_handle;
int ret = 0;
status = acpi_get_handle(NULL, "\\_SB", &handle);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!acpi_has_method(handle, "PCCH"))
return -ENODEV;
status = acpi_get_handle(handle, "_OSC", &osc_handle);
if (ACPI_SUCCESS(status)) {
ret = pcc_cpufreq_do_osc(&osc_handle);
if (ret)
pr_debug("probe: _OSC evaluation did not succeed\n");
/* Firmware's use of _OSC is optional */
ret = 0;
}
status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
if (ACPI_FAILURE(status))
return -ENODEV;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_PACKAGE) {
ret = -ENODEV;
goto out_free;
}
member = &out_obj->package.elements[0];
if (member->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto out_free;
}
mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
pr_debug("probe: mem_resource descriptor: 0x%x,"
" length: %d, space_id: %d, resource_usage: %d,"
" type_specific: %d, granularity: 0x%llx,"
" minimum: 0x%llx, maximum: 0x%llx,"
" translation_offset: 0x%llx, address_length: 0x%llx\n",
mem_resource->descriptor, mem_resource->length,
mem_resource->space_id, mem_resource->resource_usage,
mem_resource->type_specific, mem_resource->granularity,
mem_resource->minimum, mem_resource->maximum,
mem_resource->translation_offset,
mem_resource->address_length);
if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
ret = -ENODEV;
goto out_free;
}
pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
ret = -ENOMEM;
goto out_free;
}
pcch_hdr = pcch_virt_addr;
pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
pr_debug("probe: PCCH header is at physical address: 0x%llx,"
" signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
" supported features: 0x%x, command field: 0x%x,"
" status field: 0x%x, nominal latency: %d us\n",
mem_resource->minimum, ioread32(&pcch_hdr->signature),
ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
ioread32(&pcch_hdr->latency));
pr_debug("probe: min time between commands: %d us,"
" max time between commands: %d us,"
" nominal CPU frequency: %d MHz,"
" minimum CPU frequency: %d MHz,"
" minimum CPU frequency without throttling: %d MHz\n",
ioread32(&pcch_hdr->minimum_time),
ioread32(&pcch_hdr->maximum_time),
ioread32(&pcch_hdr->nominal),
ioread32(&pcch_hdr->throttled_frequency),
ioread32(&pcch_hdr->minimum_frequency));
member = &out_obj->package.elements[1];
if (member->type != ACPI_TYPE_BUFFER) {
ret = -ENODEV;
goto pcch_free;
}
reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
doorbell.space_id = reg_resource->space_id;
doorbell.bit_width = reg_resource->bit_width;
doorbell.bit_offset = reg_resource->bit_offset;
doorbell.access_width = 64;
doorbell.address = reg_resource->address;
pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
"bit_offset is %d, access_width is %d, address is 0x%llx\n",
doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
doorbell.access_width, reg_resource->address);
member = &out_obj->package.elements[2];
if (member->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto pcch_free;
}
doorbell_preserve = member->integer.value;
member = &out_obj->package.elements[3];
if (member->type != ACPI_TYPE_INTEGER) {
ret = -ENODEV;
goto pcch_free;
}
doorbell_write = member->integer.value;
pr_debug("probe: doorbell_preserve: 0x%llx,"
" doorbell_write: 0x%llx\n",
doorbell_preserve, doorbell_write);
pcc_cpu_info = alloc_percpu(struct pcc_cpu);
if (!pcc_cpu_info) {
ret = -ENOMEM;
goto pcch_free;
}
printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
" limits: %d MHz, %d MHz\n", PCC_VERSION,
ioread32(&pcch_hdr->minimum_frequency),
ioread32(&pcch_hdr->nominal));
kfree(output.pointer);
return ret;
pcch_free:
pcc_clear_mapping();
out_free:
kfree(output.pointer);
return ret;
}
static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
unsigned int result = 0;
if (!pcch_virt_addr) {
result = -1;
goto out;
}
result = pcc_get_offset(cpu);
if (result) {
pr_debug("init: PCCP evaluation failed\n");
goto out;
}
policy->max = policy->cpuinfo.max_freq =
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
out:
return result;
}
static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
return 0;
}
static struct cpufreq_driver pcc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.get = pcc_get_freq,
.verify = pcc_cpufreq_verify,
.target = pcc_cpufreq_target,
.init = pcc_cpufreq_cpu_init,
.exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
};
static int __init pcc_cpufreq_init(void)
{
int ret;
if (acpi_disabled)
return 0;
ret = pcc_cpufreq_probe();
if (ret) {
pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n");
return ret;
}
ret = cpufreq_register_driver(&pcc_cpufreq_driver);
return ret;
}
static void __exit pcc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&pcc_cpufreq_driver);
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
}
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, },
{ACPI_PROCESSOR_DEVICE_HID, },
{},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
MODULE_VERSION(PCC_VERSION);
MODULE_DESCRIPTION("Processor Clocking Control interface driver");
MODULE_LICENSE("GPL");
late_initcall(pcc_cpufreq_init);
module_exit(pcc_cpufreq_exit);
| gpl-2.0 |
MoKee/android_kernel_samsung_d710 | drivers/net/usb/kalmia.c | 2380 | 10952 | /*
* USB network interface driver for Samsung Kalmia based LTE USB modem like the
* Samsung GT-B3730 and GT-B3710.
*
* Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
*
* Sponsored by Quicklink Video Distribution Services Ltd.
*
* Based on the cdc_eem module.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ctype.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/gfp.h>
/*
* The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
* handled by the "option" module and an ethernet data port handled by this
* module.
*
* The stick must first be switched into modem mode by usb_modeswitch
* or similar tool. Then the modem gets sent two initialization packets by
* this module, which gives the MAC address of the device. User space can then
* connect the modem using AT commands through the ACM port and then use
* DHCP on the network interface exposed by this module. Network packets are
* sent to and from the modem in a proprietary format discovered after watching
* the behavior of the windows driver for the modem.
*
* More information about the use of the modem is available in usb_modeswitch
* forum and the project page:
*
* http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
* https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
*/
/* #define DEBUG */
/* #define VERBOSE */
#define KALMIA_HEADER_LENGTH 6
#define KALMIA_ALIGN_SIZE 4
#define KALMIA_USB_TIMEOUT 10000
/*-------------------------------------------------------------------------*/
static int
kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
u8 *buffer, u8 expected_len)
{
int act_len;
int status;
netdev_dbg(dev->net, "Sending init packet");
status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
"Error sending init packet. Status %i, length %i\n",
status, act_len);
return status;
}
else if (act_len != init_msg_len) {
netdev_err(dev->net,
"Did not send all of init packet. Bytes sent: %i",
act_len);
}
else {
netdev_dbg(dev->net, "Successfully sent init packet.");
}
status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0)
netdev_err(dev->net,
"Error receiving init result. Status %i, length %i\n",
status, act_len);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);
return status;
}
static int
kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
{
const static char init_msg_1[] =
{ 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
0x00, 0x00 };
const static char init_msg_2[] =
{ 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
0x00, 0x00 };
const static int buflen = 28;
char *usb_buf;
int status;
usb_buf = kmalloc(buflen, GFP_DMA | GFP_KERNEL);
if (!usb_buf)
return -ENOMEM;
memcpy(usb_buf, init_msg_1, 12);
status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
/ sizeof(init_msg_1[0]), usb_buf, 24);
if (status != 0)
return status;
memcpy(usb_buf, init_msg_2, 12);
status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
/ sizeof(init_msg_2[0]), usb_buf, 28);
if (status != 0)
return status;
memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
kfree(usb_buf);
return status;
}
static int
kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
u8 ethernet_addr[ETH_ALEN];
/* Don't bind to AT command interface */
if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
return -EINVAL;
dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
dev->status = NULL;
dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
dev->hard_mtu = 1400;
dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
if (status < 0) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
}
memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
return status;
}
static struct sk_buff *
kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb2 = NULL;
u16 content_len;
unsigned char *header_start;
unsigned char ether_type_1, ether_type_2;
u8 remainder, padlen = 0;
if (!skb_cloned(skb)) {
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
>= KALMIA_HEADER_LENGTH))
goto done;
if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
+ KALMIA_ALIGN_SIZE)) {
skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
skb->data, skb->len);
skb_set_tail_pointer(skb, skb->len);
goto done;
}
}
skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
KALMIA_ALIGN_SIZE, flags);
if (!skb2)
return NULL;
dev_kfree_skb_any(skb);
skb = skb2;
done:
header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
ether_type_2);
/* According to empiric data for data packages */
header_start[0] = 0x57;
header_start[1] = 0x44;
content_len = skb->len - KALMIA_HEADER_LENGTH;
put_unaligned_le16(content_len, &header_start[2]);
header_start[4] = ether_type_1;
header_start[5] = ether_type_2;
/* Align to 4 bytes by padding with zeros */
remainder = skb->len % KALMIA_ALIGN_SIZE;
if (remainder > 0) {
padlen = KALMIA_ALIGN_SIZE - remainder;
memset(skb_put(skb, padlen), 0, padlen);
}
netdev_dbg(
dev->net,
"Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
content_len, padlen, header_start[0], header_start[1],
header_start[2], header_start[3], header_start[4],
header_start[5]);
return skb;
}
static int
kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
/*
* Our task here is to strip off framing, leaving skb with one
* data frame for the usbnet framework code to process.
*/
const static u8 HEADER_END_OF_USB_PACKET[] =
{ 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
const static u8 EXPECTED_UNKNOWN_HEADER_1[] =
{ 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
const static u8 EXPECTED_UNKNOWN_HEADER_2[] =
{ 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
int i = 0;
/* incomplete header? */
if (skb->len < KALMIA_HEADER_LENGTH)
return 0;
do {
struct sk_buff *skb2 = NULL;
u8 *header_start;
u16 usb_packet_length, ether_packet_length;
int is_last;
header_start = skb->data;
if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
header_start, EXPECTED_UNKNOWN_HEADER_2,
sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
netdev_dbg(
dev->net,
"Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
header_start[0], header_start[1],
header_start[2], header_start[3],
header_start[4], header_start[5],
skb->len - KALMIA_HEADER_LENGTH);
}
else {
netdev_err(
dev->net,
"Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
header_start[0], header_start[1],
header_start[2], header_start[3],
header_start[4], header_start[5],
skb->len - KALMIA_HEADER_LENGTH);
return 0;
}
}
else
netdev_dbg(
dev->net,
"Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
header_start[0], header_start[1], header_start[2],
header_start[3], header_start[4], header_start[5],
skb->len - KALMIA_HEADER_LENGTH);
/* subtract start header and end header */
usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
ether_packet_length = get_unaligned_le16(&header_start[2]);
skb_pull(skb, KALMIA_HEADER_LENGTH);
/* Some small packets misses end marker */
if (usb_packet_length < ether_packet_length) {
ether_packet_length = usb_packet_length
+ KALMIA_HEADER_LENGTH;
is_last = true;
}
else {
netdev_dbg(dev->net, "Correct package length #%i", i
+ 1);
is_last = (memcmp(skb->data + ether_packet_length,
HEADER_END_OF_USB_PACKET,
sizeof(HEADER_END_OF_USB_PACKET)) == 0);
if (!is_last) {
header_start = skb->data + ether_packet_length;
netdev_dbg(
dev->net,
"End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
header_start[0], header_start[1],
header_start[2], header_start[3],
header_start[4], header_start[5],
skb->len - KALMIA_HEADER_LENGTH);
}
}
if (is_last) {
skb2 = skb;
}
else {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2))
return 0;
}
skb_trim(skb2, ether_packet_length);
if (is_last) {
return 1;
}
else {
usbnet_skb_return(dev, skb2);
skb_pull(skb, ether_packet_length);
}
i++;
}
while (skb->len);
return 1;
}
static const struct driver_info kalmia_info = {
.description = "Samsung Kalmia LTE USB dongle",
.flags = FLAG_WWAN,
.bind = kalmia_bind,
.rx_fixup = kalmia_rx_fixup,
.tx_fixup = kalmia_tx_fixup
};
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products[] = {
/* The unswitched USB ID, to get the module auto loaded: */
{ USB_DEVICE(0x04e8, 0x689a) },
/* The stick swithed into modem (by e.g. usb_modeswitch): */
{ USB_DEVICE(0x04e8, 0x6889),
.driver_info = (unsigned long) &kalmia_info, },
{ /* EMPTY == end of list */} };
MODULE_DEVICE_TABLE( usb, products);
static struct usb_driver kalmia_driver = {
.name = "kalmia",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume
};
static int __init kalmia_init(void)
{
return usb_register(&kalmia_driver);
}
module_init( kalmia_init);
static void __exit kalmia_exit(void)
{
usb_deregister(&kalmia_driver);
}
module_exit( kalmia_exit);
MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bsorensen110/prevail2spr_custom-kernel | drivers/bcma/driver_chipcommon.c | 2380 | 2410 | /*
* Broadcom specific AMBA
* ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
* Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
value &= mask;
value |= bcma_cc_read32(cc, offset) & ~mask;
bcma_cc_write32(cc, offset, value);
return value;
}
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
{
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
if (cc->core->id.rev >= 35)
cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
if (cc->core->id.rev >= 20) {
bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
}
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
pr_err("Power control not implemented!\n");
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{
/* instant NMI */
bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
}
void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
bcma_cc_write32_masked(cc, BCMA_CC_IRQMASK, mask, value);
}
u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask)
{
return bcma_cc_read32(cc, BCMA_CC_IRQSTAT) & mask;
}
u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
{
return bcma_cc_read32(cc, BCMA_CC_GPIOIN) & mask;
}
u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
}
u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
}
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
}
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
}
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
}
| gpl-2.0 |
sakuramilk/linux-3.0.y | drivers/acpi/dock.c | 2636 | 28502 | /*
* dock.c - ACPI dock station driver
*
* Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/stddef.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#define PREFIX "ACPI: "
#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
ACPI_MODULE_NAME("dock");
MODULE_AUTHOR("Kristen Carlson Accardi");
MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
static int immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
"undock immediately when the undock button is pressed, 0 will cause"
" the driver to wait for userspace to write the undock sysfs file "
" before undocking");
static struct atomic_notifier_head dock_notifier_list;
static const struct acpi_device_id dock_device_ids[] = {
{"LNXDOCK", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, dock_device_ids);
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
u32 flags;
spinlock_t dd_lock;
struct mutex hp_lock;
struct list_head dependent_devices;
struct list_head hotplug_devices;
struct list_head sibling;
struct platform_device *dock_device;
};
static LIST_HEAD(dock_stations);
static int dock_station_count;
struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
struct acpi_dock_ops *ops;
void *context;
};
#define DOCK_DOCKING 0x00000001
#define DOCK_UNDOCKING 0x00000002
#define DOCK_IS_DOCK 0x00000010
#define DOCK_IS_ATA 0x00000020
#define DOCK_IS_BAT 0x00000040
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
/*****************************************************************************
* Dock Dependent device functions *
*****************************************************************************/
/**
* add_dock_dependent_device - associate a device with the dock station
* @ds: The dock station
* @handle: handle of the dependent device
*
* Add the dependent device to the dock's dependent device list.
*/
static int
add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->handle = handle;
INIT_LIST_HEAD(&dd->list);
INIT_LIST_HEAD(&dd->hotplug_list);
spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
spin_unlock(&ds->dd_lock);
return 0;
}
/**
* dock_add_hotplug_device - associate a hotplug handler with the dock station
* @ds: The dock station
* @dd: The dependent device struct
*
* Add the dependent device to the dock's hotplug device list
*/
static void
dock_add_hotplug_device(struct dock_station *ds,
struct dock_dependent_device *dd)
{
mutex_lock(&ds->hp_lock);
list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
mutex_unlock(&ds->hp_lock);
}
/**
* dock_del_hotplug_device - remove a hotplug handler from the dock station
* @ds: The dock station
* @dd: the dependent device struct
*
* Delete the dependent device from the dock's hotplug device list
*/
static void
dock_del_hotplug_device(struct dock_station *ds,
struct dock_dependent_device *dd)
{
mutex_lock(&ds->hp_lock);
list_del(&dd->hotplug_list);
mutex_unlock(&ds->hp_lock);
}
/**
* find_dock_dependent_device - get a device dependent on this dock
* @ds: the dock station
* @handle: the acpi_handle of the device we want
*
* iterate over the dependent device list for this dock. If the
* dependent device matches the handle, return.
*/
static struct dock_dependent_device *
find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
spin_lock(&ds->dd_lock);
list_for_each_entry(dd, &ds->dependent_devices, list) {
if (handle == dd->handle) {
spin_unlock(&ds->dd_lock);
return dd;
}
}
spin_unlock(&ds->dd_lock);
return NULL;
}
/*****************************************************************************
* Dock functions *
*****************************************************************************/
/**
* is_dock - see if a device is a dock station
* @handle: acpi handle of the device
*
* If an acpi object has a _DCK method, then it is by definition a dock
* station, so return true.
*/
static int is_dock(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(handle, "_DCK", &tmp);
if (ACPI_FAILURE(status))
return 0;
return 1;
}
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status))
return 0;
return 1;
}
static int is_ata(acpi_handle handle)
{
acpi_handle tmp;
if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
(ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
return 1;
return 0;
}
static int is_battery(acpi_handle handle)
{
struct acpi_device_info *info;
int ret = 1;
if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
return 0;
if (!(info->valid & ACPI_VALID_HID))
ret = 0;
else
ret = !strcmp("PNP0C0A", info->hardware_id.string);
kfree(info);
return ret;
}
static int is_ejectable_bay(acpi_handle handle)
{
acpi_handle phandle;
if (!is_ejectable(handle))
return 0;
if (is_battery(handle) || is_ata(handle))
return 1;
if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
return 1;
return 0;
}
/**
* is_dock_device - see if a device is on a dock station
* @handle: acpi handle of the device
*
* If this device is either the dock station itself,
* or is a device dependent on the dock station, then it
* is a dock device
*/
int is_dock_device(acpi_handle handle)
{
struct dock_station *dock_station;
if (!dock_station_count)
return 0;
if (is_dock(handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
if (find_dock_dependent_device(dock_station, handle))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(is_dock_device);
/**
* dock_present - see if the dock station is present.
* @ds: the dock station
*
* execute the _STA method. note that present does not
* imply that we are docked.
*/
static int dock_present(struct dock_station *ds)
{
unsigned long long sta;
acpi_status status;
if (ds) {
status = acpi_evaluate_integer(ds->handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
return 1;
}
return 0;
}
/**
* dock_create_acpi_device - add new devices to acpi
* @handle - handle of the device to add
*
* This function will create a new acpi_device for the given
* handle if one does not exist already. This should cause
* acpi to scan for drivers for the given devices, and call
* matching driver's add routine.
*
* Returns a pointer to the acpi_device corresponding to the handle.
*/
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
struct acpi_device *parent_device;
acpi_handle parent;
int ret;
if (acpi_bus_get_device(handle, &device)) {
/*
* no device created for this object,
* so we should create one.
*/
acpi_get_parent(handle, &parent);
if (acpi_bus_get_device(parent, &parent_device))
parent_device = NULL;
ret = acpi_bus_add(&device, parent_device, handle,
ACPI_BUS_TYPE_DEVICE);
if (ret) {
pr_debug("error adding bus, %x\n", -ret);
return NULL;
}
}
return device;
}
/**
* dock_remove_acpi_device - remove the acpi_device struct from acpi
* @handle - the handle of the device to remove
*
* Tell acpi to remove the acpi_device. This should cause any loaded
* driver to have it's remove routine called.
*/
static void dock_remove_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
int ret;
if (!acpi_bus_get_device(handle, &device)) {
ret = acpi_bus_trim(device, 1);
if (ret)
pr_debug("error removing bus, %x\n", -ret);
}
}
/**
* hotplug_dock_devices - insert or remove devices on the dock station
* @ds: the dock station
* @event: either bus check or eject request
*
* Some devices on the dock station need to have drivers called
* to perform hotplug operations after a dock event has occurred.
* Traverse the list of dock devices that have registered a
* hotplug handler, and call the handler.
*/
static void hotplug_dock_devices(struct dock_station *ds, u32 event)
{
struct dock_dependent_device *dd;
mutex_lock(&ds->hp_lock);
/*
* First call driver specific hotplug functions
*/
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->handler)
dd->ops->handler(dd->handle, event, dd->context);
/*
* Now make sure that an acpi_device is created for each
* dependent device, or removed if this is an eject request.
* This will cause acpi_drivers to be stopped/started if they
* exist
*/
list_for_each_entry(dd, &ds->dependent_devices, list) {
if (event == ACPI_NOTIFY_EJECT_REQUEST)
dock_remove_acpi_device(dd->handle);
else
dock_create_acpi_device(dd->handle);
}
mutex_unlock(&ds->hp_lock);
}
static void dock_event(struct dock_station *ds, u32 event, int num)
{
struct device *dev = &ds->dock_device->dev;
char event_string[13];
char *envp[] = { event_string, NULL };
struct dock_dependent_device *dd;
if (num == UNDOCK_EVENT)
sprintf(event_string, "EVENT=undock");
else
sprintf(event_string, "EVENT=dock");
/*
* Indicate that the status of the dock station has
* changed.
*/
if (num == DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->uevent)
dd->ops->uevent(dd->handle, event, dd->context);
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
* eject_dock - respond to a dock eject request
* @ds: the dock station
*
* This is called after _DCK is called, to execute the dock station's
* _EJ0 method.
*/
static void eject_dock(struct dock_station *ds)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
acpi_handle tmp;
/* all dock devices should have _EJ0, but check anyway */
status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
if (ACPI_FAILURE(status)) {
pr_debug("No _EJ0 support for dock device\n");
return;
}
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 1;
status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
pr_debug("Failed to evaluate _EJ0!\n");
}
/**
* handle_dock - handle a dock event
* @ds: the dock station
* @dock: to dock, or undock - that is the question
*
* Execute the _DCK method in response to an acpi event
*/
static void handle_dock(struct dock_station *ds, int dock)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(ds->handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_INFO PREFIX "%s - %s\n",
(char *)name_buffer.pointer, dock ? "docking" : "undocking");
/* _DCK method has one argument */
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = dock;
status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
ACPI_EXCEPTION((AE_INFO, status, "%s - failed to execute"
" _DCK\n", (char *)name_buffer.pointer));
kfree(buffer.pointer);
kfree(name_buffer.pointer);
}
static inline void dock(struct dock_station *ds)
{
handle_dock(ds, 1);
}
static inline void undock(struct dock_station *ds)
{
handle_dock(ds, 0);
}
static inline void begin_dock(struct dock_station *ds)
{
ds->flags |= DOCK_DOCKING;
}
static inline void complete_dock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_DOCKING);
ds->last_dock_time = jiffies;
}
static inline void begin_undock(struct dock_station *ds)
{
ds->flags |= DOCK_UNDOCKING;
}
static inline void complete_undock(struct dock_station *ds)
{
ds->flags &= ~(DOCK_UNDOCKING);
}
static void dock_lock(struct dock_station *ds, int lock)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = !!lock;
status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
if (lock)
printk(KERN_WARNING PREFIX "Locking device failed\n");
else
printk(KERN_WARNING PREFIX "Unlocking device failed\n");
}
}
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
*
* Sometimes while docking, false dock events can be sent to the driver
* because good connections aren't made or some other reason. Ignore these
* if we are in the middle of doing something.
*/
static int dock_in_progress(struct dock_station *ds)
{
if ((ds->flags & DOCK_DOCKING) ||
time_before(jiffies, (ds->last_dock_time + HZ)))
return 1;
return 0;
}
/**
* register_dock_notifier - add yourself to the dock notifier list
* @nb: the callers notifier block
*
* If a driver wishes to be notified about dock events, they can
* use this function to put a notifier block on the dock notifier list.
* this notifier call chain will be called after a dock event, but
* before hotplugging any new devices.
*/
int register_dock_notifier(struct notifier_block *nb)
{
if (!dock_station_count)
return -ENODEV;
return atomic_notifier_chain_register(&dock_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_dock_notifier);
/**
* unregister_dock_notifier - remove yourself from the dock notifier list
* @nb: the callers notifier block
*/
void unregister_dock_notifier(struct notifier_block *nb)
{
if (!dock_station_count)
return;
atomic_notifier_chain_unregister(&dock_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_dock_notifier);
/**
* register_hotplug_dock_device - register a hotplug function
* @handle: the handle of the device
* @ops: handlers to call after docking
* @context: device specific data
*
* If a driver would like to perform a hotplug operation after a dock
* event, they can register an acpi_notifiy_handler to be called by
* the dock driver after _DCK is executed.
*/
int
register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
struct dock_station *dock_station;
int ret = -EINVAL;
if (!dock_station_count)
return -ENODEV;
/*
* make sure this handle is for a device dependent on the dock,
* this would include the dock station itself
*/
list_for_each_entry(dock_station, &dock_stations, sibling) {
/*
* An ATA bay can be in a dock and itself can be ejected
* separately, so there are two 'dock stations' which need the
* ops
*/
dd = find_dock_dependent_device(dock_station, handle);
if (dd) {
dd->ops = ops;
dd->context = context;
dock_add_hotplug_device(dock_station, dd);
ret = 0;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
/**
* unregister_hotplug_dock_device - remove yourself from the hotplug list
* @handle: the acpi handle of the device
*/
void unregister_hotplug_dock_device(acpi_handle handle)
{
struct dock_dependent_device *dd;
struct dock_station *dock_station;
if (!dock_station_count)
return;
list_for_each_entry(dock_station, &dock_stations, sibling) {
dd = find_dock_dependent_device(dock_station, handle);
if (dd)
dock_del_hotplug_device(dock_station, dd);
}
}
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
/**
* handle_eject_request - handle an undock request checking for error conditions
*
* Check to make sure the dock device is still present, then undock and
* hotremove all the devices that may need removing.
*/
static int handle_eject_request(struct dock_station *ds, u32 event)
{
if (dock_in_progress(ds))
return -EBUSY;
/*
* here we need to generate the undock
* event prior to actually doing the undock
* so that the device struct still exists.
* Also, even send the dock event if the
* device is not present anymore
*/
dock_event(ds, event, UNDOCK_EVENT);
hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
undock(ds);
dock_lock(ds, 0);
eject_dock(ds);
if (dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to undock!\n");
return -EBUSY;
}
complete_undock(ds);
return 0;
}
/**
* dock_notify - act upon an acpi dock notification
* @handle: the dock station handle
* @event: the acpi event
* @data: our driver data struct
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
static void dock_notify(acpi_handle handle, u32 event, void *data)
{
struct dock_station *ds = data;
struct acpi_device *tmp;
int surprise_removal = 0;
/*
* According to acpi spec 3.0a, if a DEVICE_CHECK notification
* is sent and _DCK is present, it is assumed to mean an undock
* request.
*/
if ((ds->flags & DOCK_IS_DOCK) && event == ACPI_NOTIFY_DEVICE_CHECK)
event = ACPI_NOTIFY_EJECT_REQUEST;
/*
* dock station: BUS_CHECK - docked or surprise removal
* DEVICE_CHECK - undocked
* other device: BUS_CHECK/DEVICE_CHECK - added or surprise removal
*
* To simplify event handling, dock dependent device handler always
* get ACPI_NOTIFY_BUS_CHECK/ACPI_NOTIFY_DEVICE_CHECK for add and
* ACPI_NOTIFY_EJECT_REQUEST for removal
*/
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
&tmp)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
printk(KERN_ERR PREFIX "Unable to dock!\n");
complete_dock(ds);
break;
}
atomic_notifier_call_chain(&dock_notifier_list,
event, NULL);
hotplug_dock_devices(ds, event);
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
dock_lock(ds, 1);
acpi_update_all_gpes();
break;
}
if (dock_present(ds) || dock_in_progress(ds))
break;
/* This is a surprise removal */
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
|| surprise_removal)
handle_eject_request(ds, event);
else
dock_event(ds, event, UNDOCK_EVENT);
break;
default:
printk(KERN_ERR PREFIX "Unknown dock event %d\n", event);
}
}
struct dock_data {
acpi_handle handle;
unsigned long event;
struct dock_station *ds;
};
static void acpi_dock_deferred_cb(void *context)
{
struct dock_data *data = context;
dock_notify(data->handle, data->event, data->ds);
kfree(data);
}
static int acpi_dock_notifier_call(struct notifier_block *this,
unsigned long event, void *data)
{
struct dock_station *dock_station;
acpi_handle handle = data;
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
struct dock_data *dd;
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return 0;
dd->handle = handle;
dd->event = event;
dd->ds = dock_station;
acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
return 0 ;
}
}
return 0;
}
static struct notifier_block dock_acpi_notifier = {
.notifier_call = acpi_dock_notifier_call,
};
/**
* find_dock_devices - find devices on the dock station
* @handle: the handle of the device we are examining
* @lvl: unused
* @context: the dock station private data
* @rv: unused
*
* This function is called by acpi_walk_namespace. It will
* check to see if an object has an _EJD method. If it does, then it
* will see if it is dependent on the dock station.
*/
static acpi_status
find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
acpi_handle tmp, parent;
struct dock_station *ds = context;
status = acpi_bus_get_ejd(handle, &tmp);
if (ACPI_FAILURE(status)) {
/* try the parent device as well */
status = acpi_get_parent(handle, &parent);
if (ACPI_FAILURE(status))
goto fdd_out;
/* see if parent is dependent on dock */
status = acpi_bus_get_ejd(parent, &tmp);
if (ACPI_FAILURE(status))
goto fdd_out;
}
if (tmp == ds->handle)
add_dock_dependent_device(ds, handle);
fdd_out:
return AE_OK;
}
/*
* show_docked - read method for "docked" file in sysfs
*/
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *tmp;
struct dock_station *dock_station = dev->platform_data;
if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
return snprintf(buf, PAGE_SIZE, "1\n");
return snprintf(buf, PAGE_SIZE, "0\n");
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
/*
* show_flags - read method for flags file in sysfs
*/
static ssize_t show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
/*
* write_undock - write method for "undock" file in sysfs
*/
static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
if (!count)
return -EINVAL;
begin_undock(dock_station);
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
return ret ? ret: count;
}
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
return 0;
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static ssize_t show_dock_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
if (dock_station->flags & DOCK_IS_DOCK)
type = "dock_station";
else if (dock_station->flags & DOCK_IS_ATA)
type = "ata_bay";
else if (dock_station->flags & DOCK_IS_BAT)
type = "battery_bay";
else
type = "unknown";
return snprintf(buf, PAGE_SIZE, "%s\n", type);
}
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,
&dev_attr_flags.attr,
&dev_attr_undock.attr,
&dev_attr_uid.attr,
&dev_attr_type.attr,
NULL
};
static struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
/**
* dock_add - add a new dock station
* @handle: the dock station handle
*
* allocated and initialize a new dock station device. Find all devices
* that are on the dock station, and register for dock event notifications.
*/
static int __init dock_add(acpi_handle handle)
{
int ret, id;
struct dock_station ds, *dock_station;
struct platform_device *dd;
id = dock_station_count;
memset(&ds, 0, sizeof(ds));
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);
dock_station = dd->dev.platform_data;
dock_station->handle = handle;
dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
mutex_init(&dock_station->hp_lock);
spin_lock_init(&dock_station->dd_lock);
INIT_LIST_HEAD(&dock_station->sibling);
INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
dev_set_uevent_suppress(&dd->dev, 0);
if (is_dock(handle))
dock_station->flags |= DOCK_IS_DOCK;
if (is_ata(handle))
dock_station->flags |= DOCK_IS_ATA;
if (is_battery(handle))
dock_station->flags |= DOCK_IS_BAT;
ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
goto err_unregister;
/* Find dependent devices */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_dock_devices, NULL,
dock_station, NULL);
/* add the dock station as a device dependent on itself */
ret = add_dock_dependent_device(dock_station, handle);
if (ret)
goto err_rmgroup;
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
return 0;
err_rmgroup:
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
return ret;
}
/**
* dock_remove - free up resources related to the dock station
*/
static int dock_remove(struct dock_station *ds)
{
struct dock_dependent_device *dd, *tmp;
struct platform_device *dock_device = ds->dock_device;
if (!dock_station_count)
return 0;
/* remove dependent devices */
list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
kfree(dd);
list_del(&ds->sibling);
/* cleanup sysfs */
sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
platform_device_unregister(dock_device);
return 0;
}
/**
* find_dock - look for a dock station
* @handle: acpi handle of a device
* @lvl: unused
* @context: counter of dock stations found
* @rv: unused
*
* This is called by acpi_walk_namespace to look for dock stations.
*/
static __init acpi_status
find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
if (is_dock(handle))
dock_add(handle);
return AE_OK;
}
static __init acpi_status
find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
/* If bay is a dock, it's already handled */
if (is_ejectable_bay(handle) && !is_dock(handle))
dock_add(handle);
return AE_OK;
}
static int __init dock_init(void)
{
if (acpi_disabled)
return 0;
/* look for a dock station */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_dock, NULL, NULL, NULL);
/* look for bay */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_bay, NULL, NULL, NULL);
if (!dock_station_count) {
printk(KERN_INFO PREFIX "No dock devices found.\n");
return 0;
}
register_acpi_bus_notifier(&dock_acpi_notifier);
printk(KERN_INFO PREFIX "%s: %d docks/bays found\n",
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
return 0;
}
static void __exit dock_exit(void)
{
struct dock_station *tmp, *dock_station;
unregister_acpi_bus_notifier(&dock_acpi_notifier);
list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
dock_remove(dock_station);
}
/*
* Must be called before drivers of devices in dock, otherwise we can't know
* which devices are in a dock
*/
subsys_initcall(dock_init);
module_exit(dock_exit);
| gpl-2.0 |
MROM/android_kernel_bn_encore | drivers/net/tokenring/ibmtr.c | 2636 | 62817 | /* ibmtr.c: A shared-memory IBM Token Ring 16/4 driver for linux
*
* Written 1993 by Mark Swanson and Peter De Schrijver.
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* This device driver should work with Any IBM Token Ring Card that does
* not use DMA.
*
* I used Donald Becker's (becker@scyld.com) device driver work
* as a base for most of my initial work.
*
* Changes by Peter De Schrijver
* (Peter.Deschrijver@linux.cc.kuleuven.ac.be) :
*
* + changed name to ibmtr.c in anticipation of other tr boards.
* + changed reset code and adapter open code.
* + added SAP open code.
* + a first attempt to write interrupt, transmit and receive routines.
*
* Changes by David W. Morris (dwm@shell.portal.com) :
* 941003 dwm: - Restructure tok_probe for multiple adapters, devices.
* + Add comments, misc reorg for clarity.
* + Flatten interrupt handler levels.
*
* Changes by Farzad Farid (farzy@zen.via.ecp.fr)
* and Pascal Andre (andre@chimay.via.ecp.fr) (March 9 1995) :
* + multi ring support clean up.
* + RFC1042 compliance enhanced.
*
* Changes by Pascal Andre (andre@chimay.via.ecp.fr) (September 7 1995) :
* + bug correction in tr_tx
* + removed redundant information display
* + some code reworking
*
* Changes by Michel Lespinasse (walken@via.ecp.fr),
* Yann Doussot (doussot@via.ecp.fr) and Pascal Andre (andre@via.ecp.fr)
* (February 18, 1996) :
* + modified shared memory and mmio access port the driver to
* alpha platform (structure access -> readb/writeb)
*
* Changes by Steve Kipisz (bungy@ibm.net or kipisz@vnet.ibm.com)
* (January 18 1996):
* + swapped WWOR and WWCR in ibmtr.h
* + moved some init code from tok_probe into trdev_init. The
* PCMCIA code can call trdev_init to complete initializing
* the driver.
* + added -DPCMCIA to support PCMCIA
* + detecting PCMCIA Card Removal in interrupt handler. If
* ISRP is FF, then a PCMCIA card has been removed
* 10/2000 Burt needed a new method to avoid crashing the OS
*
* Changes by Paul Norton (pnorton@cts.com) :
* + restructured the READ.LOG logic to prevent the transmit SRB
* from being rudely overwritten before the transmit cycle is
* complete. (August 15 1996)
* + completed multiple adapter support. (November 20 1996)
* + implemented csum_partial_copy in tr_rx and increased receive
* buffer size and count. Minor fixes. (March 15, 1997)
*
* Changes by Christopher Turcksin <wabbit@rtfc.demon.co.uk>
* + Now compiles ok as a module again.
*
* Changes by Paul Norton (pnorton@ieee.org) :
* + moved the header manipulation code in tr_tx and tr_rx to
* net/802/tr.c. (July 12 1997)
* + add retry and timeout on open if cable disconnected. (May 5 1998)
* + lifted 2000 byte mtu limit. now depends on shared-RAM size.
* May 25 1998)
* + can't allocate 2k recv buff at 8k shared-RAM. (20 October 1998)
*
* Changes by Joel Sloan (jjs@c-me.com) :
* + disable verbose debug messages by default - to enable verbose
* debugging, edit the IBMTR_DEBUG_MESSAGES define below
*
* Changes by Mike Phillips <phillim@amtrak.com> :
* + Added extra #ifdef's to work with new PCMCIA Token Ring Code.
* The PCMCIA code now just sets up the card so it can be recognized
* by ibmtr_probe. Also checks allocated memory vs. on-board memory
* for correct figure to use.
*
* Changes by Tim Hockin (thockin@isunix.it.ilstu.edu) :
* + added spinlocks for SMP sanity (10 March 1999)
*
* Changes by Jochen Friedrich to enable RFC1469 Option 2 multicasting
* i.e. using functional address C0 00 00 04 00 00 to transmit and
* receive multicast packets.
*
* Changes by Mike Sullivan (based on original sram patch by Dave Grothe
* to support windowing into on adapter shared ram.
* i.e. Use LANAID to setup a PnP configuration with 16K RAM. Paging
* will shift this 16K window over the entire available shared RAM.
*
* Changes by Peter De Schrijver (p2@mind.be) :
* + fixed a problem with PCMCIA card removal
*
* Change by Mike Sullivan et al.:
* + added turbo card support. No need to use lanaid to configure
* the adapter into isa compatibility mode.
*
* Changes by Burt Silverman to allow the computer to behave nicely when
* a cable is pulled or not in place, or a PCMCIA card is removed hot.
*/
/* change the define of IBMTR_DEBUG_MESSAGES to a nonzero value
in the event that chatty debug messages are desired - jjs 12/30/98 */
#define IBMTR_DEBUG_MESSAGES 0
#include <linux/module.h>
#include <linux/sched.h>
#ifdef PCMCIA /* required for ibmtr_cs.c to build */
#undef MODULE /* yes, really */
#undef ENABLE_PAGING
#else
#define ENABLE_PAGING 1
#endif
/* changes the output format of driver initialization */
#define TR_VERBOSE 0
/* some 95 OS send many non UI frame; this allow removing the warning */
#define TR_FILTERNONUI 1
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/trdevice.h>
#include <linux/ibmtr.h>
#include <net/checksum.h>
#include <asm/io.h>
#define DPRINTK(format, args...) printk("%s: " format, dev->name , ## args)
#define DPRINTD(format, args...) DummyCall("%s: " format, dev->name , ## args)
/* version and credits */
#ifndef PCMCIA
static char version[] __devinitdata =
"\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
" v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
" v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
" v2.2.1 02/08/00 Mike Sullivan <sullivam@us.ibm.com>\n"
" v2.2.2 07/27/00 Burt Silverman <burts@us.ibm.com>\n"
" v2.4.0 03/01/01 Mike Sullivan <sullivan@us.ibm.com>\n";
#endif
/* this allows displaying full adapter information */
static char *channel_def[] __devinitdata = { "ISA", "MCA", "ISA P&P" };
static char pcchannelid[] __devinitdata = {
0x05, 0x00, 0x04, 0x09,
0x04, 0x03, 0x04, 0x0f,
0x03, 0x06, 0x03, 0x01,
0x03, 0x01, 0x03, 0x00,
0x03, 0x09, 0x03, 0x09,
0x03, 0x00, 0x02, 0x00
};
static char mcchannelid[] __devinitdata = {
0x04, 0x0d, 0x04, 0x01,
0x05, 0x02, 0x05, 0x03,
0x03, 0x06, 0x03, 0x03,
0x05, 0x08, 0x03, 0x04,
0x03, 0x05, 0x03, 0x01,
0x03, 0x08, 0x02, 0x00
};
static char __devinit *adapter_def(char type)
{
switch (type) {
case 0xF: return "PC Adapter | PC Adapter II | Adapter/A";
case 0xE: return "16/4 Adapter | 16/4 Adapter/A (long)";
case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
case 0xC: return "Auto 16/4 Adapter";
default: return "adapter (unknown type)";
};
};
#define TRC_INIT 0x01 /* Trace initialization & PROBEs */
#define TRC_INITV 0x02 /* verbose init trace points */
static unsigned char ibmtr_debug_trace = 0;
static int ibmtr_probe1(struct net_device *dev, int ioaddr);
static unsigned char get_sram_size(struct tok_info *adapt_info);
static int trdev_init(struct net_device *dev);
static int tok_open(struct net_device *dev);
static int tok_init_card(struct net_device *dev);
static void tok_open_adapter(unsigned long dev_addr);
static void open_sap(unsigned char type, struct net_device *dev);
static void tok_set_multicast_list(struct net_device *dev);
static netdev_tx_t tok_send_packet(struct sk_buff *skb,
struct net_device *dev);
static int tok_close(struct net_device *dev);
static irqreturn_t tok_interrupt(int irq, void *dev_id);
static void initial_tok_int(struct net_device *dev);
static void tr_tx(struct net_device *dev);
static void tr_rx(struct net_device *dev);
static void ibmtr_reset_timer(struct timer_list*tmr,struct net_device *dev);
static void tok_rerun(unsigned long dev_addr);
static void ibmtr_readlog(struct net_device *dev);
static int ibmtr_change_mtu(struct net_device *dev, int mtu);
static void find_turbo_adapters(int *iolist);
static int ibmtr_portlist[IBMTR_MAX_ADAPTERS+1] __devinitdata = {
0xa20, 0xa24, 0, 0, 0
};
static int __devinitdata turbo_io[IBMTR_MAX_ADAPTERS] = {0};
static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
static int __devinitdata turbo_searched = 0;
#ifndef PCMCIA
static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
#endif
static void __devinit PrtChanID(char *pcid, short stride)
{
short i, j;
for (i = 0, j = 0; i < 24; i++, j += stride)
printk("%1x", ((int) pcid[j]) & 0x0f);
printk("\n");
}
static void __devinit HWPrtChanID(void __iomem *pcid, short stride)
{
short i, j;
for (i = 0, j = 0; i < 24; i++, j += stride)
printk("%1x", ((int) readb(pcid + j)) & 0x0f);
printk("\n");
}
/* We have to ioremap every checked address, because isa_readb is
* going away.
*/
static void __devinit find_turbo_adapters(int *iolist)
{
int ram_addr;
int index=0;
void __iomem *chanid;
int found_turbo=0;
unsigned char *tchanid, ctemp;
int i, j;
unsigned long jif;
void __iomem *ram_mapped ;
if (turbo_searched == 1) return;
turbo_searched=1;
for (ram_addr=0xC0000; ram_addr < 0xE0000; ram_addr+=0x2000) {
__u32 intf_tbl=0;
found_turbo=1;
ram_mapped = ioremap((u32)ram_addr,0x1fff) ;
if (ram_mapped==NULL)
continue ;
chanid=(CHANNEL_ID + ram_mapped);
tchanid=pcchannelid;
ctemp=readb(chanid) & 0x0f;
if (ctemp != *tchanid) continue;
for (i=2,j=1; i<=46; i=i+2,j++) {
if ((readb(chanid+i) & 0x0f) != tchanid[j]){
found_turbo=0;
break;
}
}
if (!found_turbo) continue;
writeb(0x90, ram_mapped+0x1E01);
for(i=2; i<0x0f; i++) {
writeb(0x00, ram_mapped+0x1E01+i);
}
writeb(0x00, ram_mapped+0x1E01);
for(jif=jiffies+TR_BUSY_INTERVAL; time_before_eq(jiffies,jif););
intf_tbl=ntohs(readw(ram_mapped+ACA_OFFSET+ACA_RW+WRBR_EVEN));
if (intf_tbl) {
#if IBMTR_DEBUG_MESSAGES
printk("ibmtr::find_turbo_adapters, Turbo found at "
"ram_addr %x\n",ram_addr);
printk("ibmtr::find_turbo_adapters, interface_table ");
for(i=0; i<6; i++) {
printk("%x:",readb(ram_addr+intf_tbl+i));
}
printk("\n");
#endif
turbo_io[index]=ntohs(readw(ram_mapped+intf_tbl+4));
turbo_irq[index]=readb(ram_mapped+intf_tbl+3);
outb(0, turbo_io[index] + ADAPTRESET);
for(jif=jiffies+TR_RST_TIME;time_before_eq(jiffies,jif););
outb(0, turbo_io[index] + ADAPTRESETREL);
index++;
continue;
}
#if IBMTR_DEBUG_MESSAGES
printk("ibmtr::find_turbo_adapters, ibmtr card found at"
" %x but not a Turbo model\n",ram_addr);
#endif
iounmap(ram_mapped) ;
} /* for */
for(i=0; i<IBMTR_MAX_ADAPTERS; i++) {
if(!turbo_io[i]) break;
for (j=0; j<IBMTR_MAX_ADAPTERS; j++) {
if ( iolist[j] && iolist[j] != turbo_io[i]) continue;
iolist[j]=turbo_io[i];
break;
}
}
}
static void ibmtr_cleanup_card(struct net_device *dev)
{
if (dev->base_addr) {
outb(0,dev->base_addr+ADAPTRESET);
schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
outb(0,dev->base_addr+ADAPTRESETREL);
}
#ifndef PCMCIA
free_irq(dev->irq, dev);
release_region(dev->base_addr, IBMTR_IO_EXTENT);
{
struct tok_info *ti = netdev_priv(dev);
iounmap(ti->mmio);
iounmap(ti->sram_virt);
}
#endif
}
/****************************************************************************
* ibmtr_probe(): Routine specified in the network device structure
* to probe for an IBM Token Ring Adapter. Routine outline:
* I. Interrogate hardware to determine if an adapter exists
* and what the speeds and feeds are
* II. Setup data structures to control execution based upon
* adapter characteristics.
*
* We expect ibmtr_probe to be called once for each device entry
* which references it.
****************************************************************************/
static int __devinit ibmtr_probe(struct net_device *dev)
{
int i;
int base_addr = dev->base_addr;
if (base_addr && base_addr <= 0x1ff) /* Don't probe at all. */
return -ENXIO;
if (base_addr > 0x1ff) { /* Check a single specified location. */
if (!ibmtr_probe1(dev, base_addr)) return 0;
return -ENODEV;
}
find_turbo_adapters(ibmtr_portlist);
for (i = 0; ibmtr_portlist[i]; i++) {
int ioaddr = ibmtr_portlist[i];
if (!ibmtr_probe1(dev, ioaddr)) return 0;
}
return -ENODEV;
}
int __devinit ibmtr_probe_card(struct net_device *dev)
{
int err = ibmtr_probe(dev);
if (!err) {
err = register_netdev(dev);
if (err)
ibmtr_cleanup_card(dev);
}
return err;
}
/*****************************************************************************/
static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
{
unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0;
void __iomem * t_mmio = NULL;
struct tok_info *ti = netdev_priv(dev);
void __iomem *cd_chanid;
unsigned char *tchanid, ctemp;
#ifndef PCMCIA
unsigned char t_irq=0;
unsigned long timeout;
static int version_printed;
#endif
/* Query the adapter PIO base port which will return
* indication of where MMIO was placed. We also have a
* coded interrupt number.
*/
segment = inb(PIOaddr);
if (segment < 0x40 || segment > 0xe0) {
/* Out of range values so we'll assume non-existent IO device
* but this is not necessarily a problem, esp if a turbo
* adapter is being used. */
#if IBMTR_DEBUG_MESSAGES
DPRINTK("ibmtr_probe1(): unhappy that inb(0x%X) == 0x%X, "
"Hardware Problem?\n",PIOaddr,segment);
#endif
return -ENODEV;
}
/*
* Compute the linear base address of the MMIO area
* as LINUX doesn't care about segments
*/
t_mmio = ioremap(((__u32) (segment & 0xfc) << 11) + 0x80000,2048);
if (!t_mmio) {
DPRINTK("Cannot remap mmiobase memory area") ;
return -ENODEV ;
}
intr = segment & 0x03; /* low bits is coded interrupt # */
if (ibmtr_debug_trace & TRC_INIT)
DPRINTK("PIOaddr: %4hx seg/intr: %2x mmio base: %p intr: %d\n"
, PIOaddr, (int) segment, t_mmio, (int) intr);
/*
* Now we will compare expected 'channelid' strings with
* what we is there to learn of ISA/MCA or not TR card
*/
#ifdef PCMCIA
iounmap(t_mmio);
t_mmio = ti->mmio; /*BMS to get virtual address */
irq = ti->irq; /*BMS to display the irq! */
#endif
cd_chanid = (CHANNEL_ID + t_mmio); /* for efficiency */
tchanid = pcchannelid;
cardpresent = TR_ISA; /* try ISA */
/* Suboptimize knowing first byte different */
ctemp = readb(cd_chanid) & 0x0f;
if (ctemp != *tchanid) { /* NOT ISA card, try MCA */
tchanid = mcchannelid;
cardpresent = TR_MCA;
if (ctemp != *tchanid) /* Neither ISA nor MCA */
cardpresent = NOTOK;
}
if (cardpresent != NOTOK) {
/* Know presumed type, try rest of ID */
for (i = 2, j = 1; i <= 46; i = i + 2, j++) {
if( (readb(cd_chanid+i)&0x0f) == tchanid[j]) continue;
/* match failed, not TR card */
cardpresent = NOTOK;
break;
}
}
/*
* If we have an ISA board check for the ISA P&P version,
* as it has different IRQ settings
*/
if (cardpresent == TR_ISA && (readb(AIPFID + t_mmio) == 0x0e))
cardpresent = TR_ISAPNP;
if (cardpresent == NOTOK) { /* "channel_id" did not match, report */
if (!(ibmtr_debug_trace & TRC_INIT)) {
#ifndef PCMCIA
iounmap(t_mmio);
#endif
return -ENODEV;
}
DPRINTK( "Channel ID string not found for PIOaddr: %4hx\n",
PIOaddr);
DPRINTK("Expected for ISA: ");
PrtChanID(pcchannelid, 1);
DPRINTK(" found: ");
/* BMS Note that this can be misleading, when hardware is flaky, because you
are reading it a second time here. So with my flaky hardware, I'll see my-
self in this block, with the HW ID matching the ISA ID exactly! */
HWPrtChanID(cd_chanid, 2);
DPRINTK("Expected for MCA: ");
PrtChanID(mcchannelid, 1);
}
/* Now, setup some of the pl0 buffers for this driver.. */
/* If called from PCMCIA, it is already set up, so no need to
waste the memory, just use the existing structure */
#ifndef PCMCIA
ti->mmio = t_mmio;
for (i = 0; i < IBMTR_MAX_ADAPTERS; i++) {
if (turbo_io[i] != PIOaddr)
continue;
#if IBMTR_DEBUG_MESSAGES
printk("ibmtr::tr_probe1, setting PIOaddr %x to Turbo\n",
PIOaddr);
#endif
ti->turbo = 1;
t_irq = turbo_irq[i];
}
#endif /* !PCMCIA */
ti->readlog_pending = 0;
init_waitqueue_head(&ti->wait_for_reset);
/* if PCMCIA, the card can be recognized as either TR_ISA or TR_ISAPNP
* depending which card is inserted. */
#ifndef PCMCIA
switch (cardpresent) {
case TR_ISA:
if (intr == 0) irq = 9; /* irq2 really is irq9 */
if (intr == 1) irq = 3;
if (intr == 2) irq = 6;
if (intr == 3) irq = 7;
ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
break;
case TR_MCA:
if (intr == 0) irq = 9;
if (intr == 1) irq = 3;
if (intr == 2) irq = 10;
if (intr == 3) irq = 11;
ti->global_int_enable = 0;
ti->adapter_int_enable = 0;
ti->sram_phys=(__u32)(inb(PIOaddr+ADAPTRESETREL) & 0xfe) << 12;
break;
case TR_ISAPNP:
if (!t_irq) {
if (intr == 0) irq = 9;
if (intr == 1) irq = 3;
if (intr == 2) irq = 10;
if (intr == 3) irq = 11;
} else
irq=t_irq;
timeout = jiffies + TR_SPIN_INTERVAL;
while (!readb(ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN)){
if (!time_after(jiffies, timeout)) continue;
DPRINTK( "Hardware timeout during initialization.\n");
iounmap(t_mmio);
return -ENODEV;
}
ti->sram_phys =
((__u32)readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_EVEN)<<12);
ti->adapter_int_enable = PIOaddr + ADAPTINTREL;
break;
} /*end switch (cardpresent) */
#endif /*not PCMCIA */
if (ibmtr_debug_trace & TRC_INIT) { /* just report int */
DPRINTK("irq=%d", irq);
printk(", sram_phys=0x%x", ti->sram_phys);
if(ibmtr_debug_trace&TRC_INITV){ /* full chat in verbose only */
DPRINTK(", ti->mmio=%p", ti->mmio);
printk(", segment=%02X", segment);
}
printk(".\n");
}
/* Get hw address of token ring card */
j = 0;
for (i = 0; i < 0x18; i = i + 2) {
/* technical reference states to do this */
temp = readb(ti->mmio + AIP + i) & 0x0f;
ti->hw_address[j] = temp;
if (j & 1)
dev->dev_addr[(j / 2)] =
ti->hw_address[j]+ (ti->hw_address[j - 1] << 4);
++j;
}
/* get Adapter type: 'F' = Adapter/A, 'E' = 16/4 Adapter II,... */
ti->adapter_type = readb(ti->mmio + AIPADAPTYPE);
/* get Data Rate: F=4Mb, E=16Mb, D=4Mb & 16Mb ?? */
ti->data_rate = readb(ti->mmio + AIPDATARATE);
/* Get Early Token Release support?: F=no, E=4Mb, D=16Mb, C=4&16Mb */
ti->token_release = readb(ti->mmio + AIPEARLYTOKEN);
/* How much shared RAM is on adapter ? */
if (ti->turbo) {
ti->avail_shared_ram=127;
} else {
ti->avail_shared_ram = get_sram_size(ti);/*in 512 byte units */
}
/* We need to set or do a bunch of work here based on previous results*/
/* Support paging? What sizes?: F=no, E=16k, D=32k, C=16 & 32k */
ti->shared_ram_paging = readb(ti->mmio + AIPSHRAMPAGE);
/* Available DHB 4Mb size: F=2048, E=4096, D=4464 */
switch (readb(ti->mmio + AIP4MBDHB)) {
case 0xe: ti->dhb_size4mb = 4096; break;
case 0xd: ti->dhb_size4mb = 4464; break;
default: ti->dhb_size4mb = 2048; break;
}
/* Available DHB 16Mb size: F=2048, E=4096, D=8192, C=16384, B=17960 */
switch (readb(ti->mmio + AIP16MBDHB)) {
case 0xe: ti->dhb_size16mb = 4096; break;
case 0xd: ti->dhb_size16mb = 8192; break;
case 0xc: ti->dhb_size16mb = 16384; break;
case 0xb: ti->dhb_size16mb = 17960; break;
default: ti->dhb_size16mb = 2048; break;
}
/* We must figure out how much shared memory space this adapter
* will occupy so that if there are two adapters we can fit both
* in. Given a choice, we will limit this adapter to 32K. The
* maximum space will will use for two adapters is 64K so if the
* adapter we are working on demands 64K (it also doesn't support
* paging), then only one adapter can be supported.
*/
/*
* determine how much of total RAM is mapped into PC space
*/
ti->mapped_ram_size= /*sixteen to onehundredtwentyeight 512byte blocks*/
1<< ((readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03) + 4);
ti->page_mask = 0;
if (ti->turbo) ti->page_mask=0xf0;
else if (ti->shared_ram_paging == 0xf); /* No paging in adapter */
else {
#ifdef ENABLE_PAGING
unsigned char pg_size = 0;
/* BMS: page size: PCMCIA, use configuration register;
ISAPNP, use LANAIDC config tool from www.ibm.com */
switch (ti->shared_ram_paging) {
case 0xf:
break;
case 0xe:
ti->page_mask = (ti->mapped_ram_size == 32) ? 0xc0 : 0;
pg_size = 32; /* 16KB page size */
break;
case 0xd:
ti->page_mask = (ti->mapped_ram_size == 64) ? 0x80 : 0;
pg_size = 64; /* 32KB page size */
break;
case 0xc:
switch (ti->mapped_ram_size) {
case 32:
ti->page_mask = 0xc0;
pg_size = 32;
break;
case 64:
ti->page_mask = 0x80;
pg_size = 64;
break;
}
break;
default:
DPRINTK("Unknown shared ram paging info %01X\n",
ti->shared_ram_paging);
iounmap(t_mmio);
return -ENODEV;
break;
} /*end switch shared_ram_paging */
if (ibmtr_debug_trace & TRC_INIT)
DPRINTK("Shared RAM paging code: %02X, "
"mapped RAM size: %dK, shared RAM size: %dK, "
"page mask: %02X\n:",
ti->shared_ram_paging, ti->mapped_ram_size / 2,
ti->avail_shared_ram / 2, ti->page_mask);
#endif /*ENABLE_PAGING */
}
#ifndef PCMCIA
/* finish figuring the shared RAM address */
if (cardpresent == TR_ISA) {
static const __u32 ram_bndry_mask[] = {
0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
};
__u32 new_base, rrr_32, chk_base, rbm;
rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
rbm = ram_bndry_mask[rrr_32];
new_base = (ibmtr_mem_base + (~rbm)) & rbm;/* up to boundary */
chk_base = new_base + (ti->mapped_ram_size << 9);
if (chk_base > (ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE)) {
DPRINTK("Shared RAM for this adapter (%05x) exceeds "
"driver limit (%05x), adapter not started.\n",
chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
iounmap(t_mmio);
return -ENODEV;
} else { /* seems cool, record what we have figured out */
ti->sram_base = new_base >> 12;
ibmtr_mem_base = chk_base;
}
}
else ti->sram_base = ti->sram_phys >> 12;
/* The PCMCIA has already got the interrupt line and the io port,
so no chance of anybody else getting it - MLP */
if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
irq);
iounmap(t_mmio);
return -ENODEV;
}
/*?? Now, allocate some of the PIO PORTs for this driver.. */
/* record PIOaddr range as busy */
if (!request_region(PIOaddr, IBMTR_IO_EXTENT, "ibmtr")) {
DPRINTK("Could not grab PIO range. Halting driver.\n");
free_irq(dev->irq, dev);
iounmap(t_mmio);
return -EBUSY;
}
if (!version_printed++) {
printk(version);
}
#endif /* !PCMCIA */
DPRINTK("%s %s found\n",
channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
irq, PIOaddr, ti->mapped_ram_size / 2);
DPRINTK("Hardware address : %pM\n", dev->dev_addr);
if (ti->page_mask)
DPRINTK("Shared RAM paging enabled. "
"Page size: %uK Shared Ram size %dK\n",
((ti->page_mask^0xff)+1) >>2, ti->avail_shared_ram / 2);
else
DPRINTK("Shared RAM paging disabled. ti->page_mask %x\n",
ti->page_mask);
/* Calculate the maximum DHB we can use */
/* two cases where avail_shared_ram doesn't equal mapped_ram_size:
1. avail_shared_ram is 127 but mapped_ram_size is 128 (typical)
2. user has configured adapter for less than avail_shared_ram
but is not using paging (she should use paging, I believe)
*/
if (!ti->page_mask) {
ti->avail_shared_ram=
min(ti->mapped_ram_size,ti->avail_shared_ram);
}
switch (ti->avail_shared_ram) {
case 16: /* 8KB shared RAM */
ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)2048);
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=2;
ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)2048);
ti->rbuf_len16 = 1032;
ti->rbuf_cnt16=2;
break;
case 32: /* 16KB shared RAM */
ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=4;
ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)4096);
ti->rbuf_len16 = 1032; /*1024 usable */
ti->rbuf_cnt16=4;
break;
case 64: /* 32KB shared RAM */
ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=6;
ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)10240);
ti->rbuf_len16 = 1032;
ti->rbuf_cnt16=6;
break;
case 127: /* 63.5KB shared RAM */
ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=6;
ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)16384);
ti->rbuf_len16 = 1032;
ti->rbuf_cnt16=16;
break;
case 128: /* 64KB shared RAM */
ti->dhb_size4mb = min(ti->dhb_size4mb, (unsigned short)4464);
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=6;
ti->dhb_size16mb = min(ti->dhb_size16mb, (unsigned short)17960);
ti->rbuf_len16 = 1032;
ti->rbuf_cnt16=16;
break;
default:
ti->dhb_size4mb = 2048;
ti->rbuf_len4 = 1032;
ti->rbuf_cnt4=2;
ti->dhb_size16mb = 2048;
ti->rbuf_len16 = 1032;
ti->rbuf_cnt16=2;
break;
}
/* this formula is not smart enough for the paging case
ti->rbuf_cnt<x> = (ti->avail_shared_ram * BLOCKSZ - ADAPT_PRIVATE -
ARBLENGTH - SSBLENGTH - DLC_MAX_SAP * SAPLENGTH -
DLC_MAX_STA * STALENGTH - ti->dhb_size<x>mb * NUM_DHB -
SRBLENGTH - ASBLENGTH) / ti->rbuf_len<x>;
*/
ti->maxmtu16 = (ti->rbuf_len16 - 8) * ti->rbuf_cnt16 - TR_HLEN;
ti->maxmtu4 = (ti->rbuf_len4 - 8) * ti->rbuf_cnt4 - TR_HLEN;
/*BMS assuming 18 bytes of Routing Information (usually works) */
DPRINTK("Maximum Receive Internet Protocol MTU 16Mbps: %d, 4Mbps: %d\n",
ti->maxmtu16, ti->maxmtu4);
dev->base_addr = PIOaddr; /* set the value for device */
dev->mem_start = ti->sram_base << 12;
dev->mem_end = dev->mem_start + (ti->mapped_ram_size << 9) - 1;
trdev_init(dev);
return 0; /* Return 0 to indicate we have found a Token Ring card. */
} /*ibmtr_probe1() */
/*****************************************************************************/
/* query the adapter for the size of shared RAM */
/* the function returns the RAM size in units of 512 bytes */
static unsigned char __devinit get_sram_size(struct tok_info *adapt_info)
{
unsigned char avail_sram_code;
static unsigned char size_code[] = { 0, 16, 32, 64, 127, 128 };
/* Adapter gives
'F' -- use RRR bits 3,2
'E' -- 8kb 'D' -- 16kb
'C' -- 32kb 'A' -- 64KB
'B' - 64KB less 512 bytes at top
(WARNING ... must zero top bytes in INIT */
avail_sram_code = 0xf - readb(adapt_info->mmio + AIPAVAILSHRAM);
if (avail_sram_code) return size_code[avail_sram_code];
else /* for code 'F', must compute size from RRR(3,2) bits */
return 1 <<
((readb(adapt_info->mmio+ACA_OFFSET+ACA_RW+RRR_ODD)>>2&3)+4);
}
/*****************************************************************************/
static const struct net_device_ops trdev_netdev_ops = {
.ndo_open = tok_open,
.ndo_stop = tok_close,
.ndo_start_xmit = tok_send_packet,
.ndo_set_multicast_list = tok_set_multicast_list,
.ndo_change_mtu = ibmtr_change_mtu,
};
static int __devinit trdev_init(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
SET_PAGE(ti->srb_page);
ti->open_failure = NO ;
dev->netdev_ops = &trdev_netdev_ops;
return 0;
}
/*****************************************************************************/
static int tok_init_card(struct net_device *dev)
{
struct tok_info *ti;
short PIOaddr;
unsigned long i;
PIOaddr = dev->base_addr;
ti = netdev_priv(dev);
/* Special processing for first interrupt after reset */
ti->do_tok_int = FIRST_INT;
/* Reset adapter */
writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
outb(0, PIOaddr + ADAPTRESET);
schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
outb(0, PIOaddr + ADAPTRESETREL);
#ifdef ENABLE_PAGING
if (ti->page_mask)
writeb(SRPR_ENABLE_PAGING,ti->mmio+ACA_OFFSET+ACA_RW+SRPR_EVEN);
#endif
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
i = sleep_on_timeout(&ti->wait_for_reset, 4 * HZ);
return i? 0 : -EAGAIN;
}
/*****************************************************************************/
static int tok_open(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
int i;
/*the case we were left in a failure state during a previous open */
if (ti->open_failure == YES) {
DPRINTK("Last time you were disconnected, how about now?\n");
printk("You can't insert with an ICS connector half-cocked.\n");
}
ti->open_status = CLOSED; /* CLOSED or OPEN */
ti->sap_status = CLOSED; /* CLOSED or OPEN */
ti->open_failure = NO; /* NO or YES */
ti->open_mode = MANUAL; /* MANUAL or AUTOMATIC */
ti->sram_phys &= ~1; /* to reverse what we do in tok_close */
/* init the spinlock */
spin_lock_init(&ti->lock);
init_timer(&ti->tr_timer);
i = tok_init_card(dev);
if (i) return i;
while (1){
tok_open_adapter((unsigned long) dev);
i= interruptible_sleep_on_timeout(&ti->wait_for_reset, 25 * HZ);
/* sig catch: estimate opening adapter takes more than .5 sec*/
if (i>(245*HZ)/10) break; /* fancier than if (i==25*HZ) */
if (i==0) break;
if (ti->open_status == OPEN && ti->sap_status==OPEN) {
netif_start_queue(dev);
DPRINTK("Adapter is up and running\n");
return 0;
}
i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
/* wait 30 seconds */
if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
}
outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
DPRINTK("TERMINATED via signal\n"); /*BMS useful */
return -EAGAIN;
}
/*****************************************************************************/
#define COMMAND_OFST 0
#define OPEN_OPTIONS_OFST 8
#define NUM_RCV_BUF_OFST 24
#define RCV_BUF_LEN_OFST 26
#define DHB_LENGTH_OFST 28
#define NUM_DHB_OFST 30
#define DLC_MAX_SAP_OFST 32
#define DLC_MAX_STA_OFST 33
static void tok_open_adapter(unsigned long dev_addr)
{
struct net_device *dev = (struct net_device *) dev_addr;
struct tok_info *ti;
int i;
ti = netdev_priv(dev);
SET_PAGE(ti->init_srb_page);
writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD);
for (i = 0; i < sizeof(struct dir_open_adapter); i++)
writeb(0, ti->init_srb + i);
writeb(DIR_OPEN_ADAPTER, ti->init_srb + COMMAND_OFST);
writew(htons(OPEN_PASS_BCON_MAC), ti->init_srb + OPEN_OPTIONS_OFST);
if (ti->ring_speed == 16) {
writew(htons(ti->dhb_size16mb), ti->init_srb + DHB_LENGTH_OFST);
writew(htons(ti->rbuf_cnt16), ti->init_srb + NUM_RCV_BUF_OFST);
writew(htons(ti->rbuf_len16), ti->init_srb + RCV_BUF_LEN_OFST);
} else {
writew(htons(ti->dhb_size4mb), ti->init_srb + DHB_LENGTH_OFST);
writew(htons(ti->rbuf_cnt4), ti->init_srb + NUM_RCV_BUF_OFST);
writew(htons(ti->rbuf_len4), ti->init_srb + RCV_BUF_LEN_OFST);
}
writeb(NUM_DHB, /* always 2 */ ti->init_srb + NUM_DHB_OFST);
writeb(DLC_MAX_SAP, ti->init_srb + DLC_MAX_SAP_OFST);
writeb(DLC_MAX_STA, ti->init_srb + DLC_MAX_STA_OFST);
ti->srb = ti->init_srb; /* We use this one in the interrupt handler */
ti->srb_page = ti->init_srb_page;
DPRINTK("Opening adapter: Xmit bfrs: %d X %d, Rcv bfrs: %d X %d\n",
readb(ti->init_srb + NUM_DHB_OFST),
ntohs(readw(ti->init_srb + DHB_LENGTH_OFST)),
ntohs(readw(ti->init_srb + NUM_RCV_BUF_OFST)),
ntohs(readw(ti->init_srb + RCV_BUF_LEN_OFST)));
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
}
/*****************************************************************************/
static void open_sap(unsigned char type, struct net_device *dev)
{
int i;
struct tok_info *ti = netdev_priv(dev);
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct dlc_open_sap); i++)
writeb(0, ti->srb + i);
#define MAX_I_FIELD_OFST 14
#define SAP_VALUE_OFST 16
#define SAP_OPTIONS_OFST 17
#define STATION_COUNT_OFST 18
writeb(DLC_OPEN_SAP, ti->srb + COMMAND_OFST);
writew(htons(MAX_I_FIELD), ti->srb + MAX_I_FIELD_OFST);
writeb(SAP_OPEN_IND_SAP | SAP_OPEN_PRIORITY, ti->srb+ SAP_OPTIONS_OFST);
writeb(SAP_OPEN_STATION_CNT, ti->srb + STATION_COUNT_OFST);
writeb(type, ti->srb + SAP_VALUE_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
}
/*****************************************************************************/
static void tok_set_multicast_list(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
struct netdev_hw_addr *ha;
unsigned char address[4];
int i;
/*BMS the next line is CRUCIAL or you may be sad when you */
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
netdev_for_each_mc_addr(ha, dev) {
address[0] |= ha->addr[2];
address[1] |= ha->addr[3];
address[2] |= ha->addr[4];
address[3] |= ha->addr[5];
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
writeb(0, ti->srb + i);
#define FUNCT_ADDRESS_OFST 6
writeb(DIR_SET_FUNC_ADDR, ti->srb + COMMAND_OFST);
for (i = 0; i < 4; i++)
writeb(address[i], ti->srb + FUNCT_ADDRESS_OFST + i);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
#if TR_VERBOSE
DPRINTK("Setting functional address: ");
for (i=0;i<4;i++) printk("%02X ", address[i]);
printk("\n");
#endif
}
/*****************************************************************************/
#define STATION_ID_OFST 4
static netdev_tx_t tok_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct tok_info *ti;
unsigned long flags;
ti = netdev_priv(dev);
netif_stop_queue(dev);
/* lock against other CPUs */
spin_lock_irqsave(&(ti->lock), flags);
/* Save skb; we'll need it when the adapter asks for the data */
ti->current_skb = skb;
SET_PAGE(ti->srb_page);
writeb(XMIT_UI_FRAME, ti->srb + COMMAND_OFST);
writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
spin_unlock_irqrestore(&(ti->lock), flags);
return NETDEV_TX_OK;
}
/*****************************************************************************/
static int tok_close(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
/* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */
/* unloading the module from memory, and then if a timer pops, ouch */
del_timer_sync(&ti->tr_timer);
outb(0, dev->base_addr + ADAPTRESET);
ti->sram_phys |= 1;
ti->open_status = CLOSED;
netif_stop_queue(dev);
DPRINTK("Adapter is closed.\n");
return 0;
}
/*****************************************************************************/
#define RETCODE_OFST 2
#define OPEN_ERROR_CODE_OFST 6
#define ASB_ADDRESS_OFST 8
#define SRB_ADDRESS_OFST 10
#define ARB_ADDRESS_OFST 12
#define SSB_ADDRESS_OFST 14
static char *printphase[]= {"Lobe media test","Physical insertion",
"Address verification","Roll call poll","Request Parameters"};
static char *printerror[]={"Function failure","Signal loss","Reserved",
"Frequency error","Timeout","Ring failure","Ring beaconing",
"Duplicate node address",
"Parameter request-retry count exceeded","Remove received",
"IMPL force received","Duplicate modifier",
"No monitor detected","Monitor contention failed for RPL"};
static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page)
{
if (ti->page_mask) {
*page = (index >> 8) & ti->page_mask;
index &= ~(ti->page_mask << 8);
}
return ti->sram_virt + index;
}
static void dir_open_adapter (struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
unsigned char ret_code;
__u16 err;
ti->srb = map_address(ti,
ntohs(readw(ti->init_srb + SRB_ADDRESS_OFST)),
&ti->srb_page);
ti->ssb = map_address(ti,
ntohs(readw(ti->init_srb + SSB_ADDRESS_OFST)),
&ti->ssb_page);
ti->arb = map_address(ti,
ntohs(readw(ti->init_srb + ARB_ADDRESS_OFST)),
&ti->arb_page);
ti->asb = map_address(ti,
ntohs(readw(ti->init_srb + ASB_ADDRESS_OFST)),
&ti->asb_page);
ti->current_skb = NULL;
ret_code = readb(ti->init_srb + RETCODE_OFST);
err = ntohs(readw(ti->init_srb + OPEN_ERROR_CODE_OFST));
if (!ret_code) {
ti->open_status = OPEN; /* TR adapter is now available */
if (ti->open_mode == AUTOMATIC) {
DPRINTK("Adapter reopened.\n");
}
writeb(~SRB_RESP_INT, ti->mmio+ACA_OFFSET+ACA_RESET+ISRP_ODD);
open_sap(EXTENDED_SAP, dev);
return;
}
ti->open_failure = YES;
if (ret_code == 7){
if (err == 0x24) {
if (!ti->auto_speedsave) {
DPRINTK("Open failed: Adapter speed must match "
"ring speed if Automatic Ring Speed Save is "
"disabled.\n");
ti->open_action = FAIL;
}else
DPRINTK("Retrying open to adjust to "
"ring speed, ");
} else if (err == 0x2d) {
DPRINTK("Physical Insertion: No Monitor Detected, ");
printk("retrying after %ds delay...\n",
TR_RETRY_INTERVAL/HZ);
} else if (err == 0x11) {
DPRINTK("Lobe Media Function Failure (0x11), ");
printk(" retrying after %ds delay...\n",
TR_RETRY_INTERVAL/HZ);
} else {
char **prphase = printphase;
char **prerror = printerror;
int pnr = err / 16 - 1;
int enr = err % 16 - 1;
DPRINTK("TR Adapter misc open failure, error code = ");
if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) ||
enr < 0 ||
enr >= ARRAY_SIZE(printerror))
printk("0x%x, invalid Phase/Error.", err);
else
printk("0x%x, Phase: %s, Error: %s\n", err,
prphase[pnr], prerror[enr]);
printk(" retrying after %ds delay...\n",
TR_RETRY_INTERVAL/HZ);
}
} else DPRINTK("open failed: ret_code = %02X..., ", ret_code);
if (ti->open_action != FAIL) {
if (ti->open_mode==AUTOMATIC){
ti->open_action = REOPEN;
ibmtr_reset_timer(&(ti->tr_timer), dev);
return;
}
wake_up(&ti->wait_for_reset);
return;
}
DPRINTK("FAILURE, CAPUT\n");
}
/******************************************************************************/
static irqreturn_t tok_interrupt(int irq, void *dev_id)
{
unsigned char status;
/* unsigned char status_even ; */
struct tok_info *ti;
struct net_device *dev;
#ifdef ENABLE_PAGING
unsigned char save_srpr;
#endif
dev = dev_id;
#if TR_VERBOSE
DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq);
#endif
ti = netdev_priv(dev);
if (ti->sram_phys & 1)
return IRQ_NONE; /* PCMCIA card extraction flag */
spin_lock(&(ti->lock));
#ifdef ENABLE_PAGING
save_srpr = readb(ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
/* Disable interrupts till processing is finished */
writeb((~INT_ENABLE), ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
/* Reset interrupt for ISA boards */
if (ti->adapter_int_enable)
outb(0, ti->adapter_int_enable);
else /* used for PCMCIA cards */
outb(0, ti->global_int_enable);
if (ti->do_tok_int == FIRST_INT){
initial_tok_int(dev);
#ifdef ENABLE_PAGING
writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
spin_unlock(&(ti->lock));
return IRQ_HANDLED;
}
/* Begin interrupt handler HERE inline to avoid the extra
levels of logic and call depth for the original solution. */
status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_ODD);
/*BMSstatus_even = readb (ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN) */
/*BMSdebugprintk("tok_interrupt: ISRP_ODD = 0x%x ISRP_EVEN = 0x%x\n", */
/*BMS status,status_even); */
if (status & ADAP_CHK_INT) {
int i;
void __iomem *check_reason;
__u8 check_reason_page = 0;
check_reason = map_address(ti,
ntohs(readw(ti->mmio+ ACA_OFFSET+ACA_RW + WWCR_EVEN)),
&check_reason_page);
SET_PAGE(check_reason_page);
DPRINTK("Adapter check interrupt\n");
DPRINTK("8 reason bytes follow: ");
for (i = 0; i < 8; i++, check_reason++)
printk("%02X ", (int) readb(check_reason));
printk("\n");
writeb(~ADAP_CHK_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
status = readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRA_EVEN);
DPRINTK("ISRA_EVEN == 0x02%x\n",status);
ti->open_status = CLOSED;
ti->sap_status = CLOSED;
ti->open_mode = AUTOMATIC;
netif_carrier_off(dev);
netif_stop_queue(dev);
ti->open_action = RESTART;
outb(0, dev->base_addr + ADAPTRESET);
ibmtr_reset_timer(&(ti->tr_timer), dev);/*BMS try to reopen*/
spin_unlock(&(ti->lock));
return IRQ_HANDLED;
}
if (readb(ti->mmio + ACA_OFFSET + ACA_RW + ISRP_EVEN)
& (TCR_INT | ERR_INT | ACCESS_INT)) {
DPRINTK("adapter error: ISRP_EVEN : %02x\n",
(int)readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRP_EVEN));
writeb(~(TCR_INT | ERR_INT | ACCESS_INT),
ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
status= readb(ti->mmio+ ACA_OFFSET + ACA_RW + ISRA_EVEN);/*BMS*/
DPRINTK("ISRA_EVEN == 0x02%x\n",status);/*BMS*/
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
#ifdef ENABLE_PAGING
writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
spin_unlock(&(ti->lock));
return IRQ_HANDLED;
}
if (status & SRB_RESP_INT) { /* SRB response */
SET_PAGE(ti->srb_page);
#if TR_VERBOSE
DPRINTK("SRB resp: cmd=%02X rsp=%02X\n",
readb(ti->srb), readb(ti->srb + RETCODE_OFST));
#endif
switch (readb(ti->srb)) { /* SRB command check */
case XMIT_DIR_FRAME:{
unsigned char xmit_ret_code;
xmit_ret_code = readb(ti->srb + RETCODE_OFST);
if (xmit_ret_code == 0xff) break;
DPRINTK("error on xmit_dir_frame request: %02X\n",
xmit_ret_code);
if (ti->current_skb) {
dev_kfree_skb_irq(ti->current_skb);
ti->current_skb = NULL;
}
/*dev->tbusy = 0;*/
netif_wake_queue(dev);
if (ti->readlog_pending)
ibmtr_readlog(dev);
break;
}
case XMIT_UI_FRAME:{
unsigned char xmit_ret_code;
xmit_ret_code = readb(ti->srb + RETCODE_OFST);
if (xmit_ret_code == 0xff) break;
DPRINTK("error on xmit_ui_frame request: %02X\n",
xmit_ret_code);
if (ti->current_skb) {
dev_kfree_skb_irq(ti->current_skb);
ti->current_skb = NULL;
}
netif_wake_queue(dev);
if (ti->readlog_pending)
ibmtr_readlog(dev);
break;
}
case DIR_OPEN_ADAPTER:
dir_open_adapter(dev);
break;
case DLC_OPEN_SAP:
if (readb(ti->srb + RETCODE_OFST)) {
DPRINTK("open_sap failed: ret_code = %02X, "
"retrying\n",
(int) readb(ti->srb + RETCODE_OFST));
ti->open_action = REOPEN;
ibmtr_reset_timer(&(ti->tr_timer), dev);
break;
}
ti->exsap_station_id = readw(ti->srb + STATION_ID_OFST);
ti->sap_status = OPEN;/* TR adapter is now available */
if (ti->open_mode==MANUAL){
wake_up(&ti->wait_for_reset);
break;
}
netif_wake_queue(dev);
netif_carrier_on(dev);
break;
case DIR_INTERRUPT:
case DIR_MOD_OPEN_PARAMS:
case DIR_SET_GRP_ADDR:
case DIR_SET_FUNC_ADDR:
case DLC_CLOSE_SAP:
if (readb(ti->srb + RETCODE_OFST))
DPRINTK("error on %02X: %02X\n",
(int) readb(ti->srb + COMMAND_OFST),
(int) readb(ti->srb + RETCODE_OFST));
break;
case DIR_READ_LOG:
if (readb(ti->srb + RETCODE_OFST)){
DPRINTK("error on dir_read_log: %02X\n",
(int) readb(ti->srb + RETCODE_OFST));
netif_wake_queue(dev);
break;
}
#if IBMTR_DEBUG_MESSAGES
#define LINE_ERRORS_OFST 0
#define INTERNAL_ERRORS_OFST 1
#define BURST_ERRORS_OFST 2
#define AC_ERRORS_OFST 3
#define ABORT_DELIMITERS_OFST 4
#define LOST_FRAMES_OFST 6
#define RECV_CONGEST_COUNT_OFST 7
#define FRAME_COPIED_ERRORS_OFST 8
#define FREQUENCY_ERRORS_OFST 9
#define TOKEN_ERRORS_OFST 10
DPRINTK("Line errors %02X, Internal errors %02X, "
"Burst errors %02X\n" "A/C errors %02X, "
"Abort delimiters %02X, Lost frames %02X\n"
"Receive congestion count %02X, "
"Frame copied errors %02X\nFrequency errors %02X, "
"Token errors %02X\n",
(int) readb(ti->srb + LINE_ERRORS_OFST),
(int) readb(ti->srb + INTERNAL_ERRORS_OFST),
(int) readb(ti->srb + BURST_ERRORS_OFST),
(int) readb(ti->srb + AC_ERRORS_OFST),
(int) readb(ti->srb + ABORT_DELIMITERS_OFST),
(int) readb(ti->srb + LOST_FRAMES_OFST),
(int) readb(ti->srb + RECV_CONGEST_COUNT_OFST),
(int) readb(ti->srb + FRAME_COPIED_ERRORS_OFST),
(int) readb(ti->srb + FREQUENCY_ERRORS_OFST),
(int) readb(ti->srb + TOKEN_ERRORS_OFST));
#endif
netif_wake_queue(dev);
break;
default:
DPRINTK("Unknown command %02X encountered\n",
(int) readb(ti->srb));
} /* end switch SRB command check */
writeb(~SRB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
} /* if SRB response */
if (status & ASB_FREE_INT) { /* ASB response */
SET_PAGE(ti->asb_page);
#if TR_VERBOSE
DPRINTK("ASB resp: cmd=%02X\n", readb(ti->asb));
#endif
switch (readb(ti->asb)) { /* ASB command check */
case REC_DATA:
case XMIT_UI_FRAME:
case XMIT_DIR_FRAME:
break;
default:
DPRINTK("unknown command in asb %02X\n",
(int) readb(ti->asb));
} /* switch ASB command check */
if (readb(ti->asb + 2) != 0xff) /* checks ret_code */
DPRINTK("ASB error %02X in cmd %02X\n",
(int) readb(ti->asb + 2), (int) readb(ti->asb));
writeb(~ASB_FREE_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
} /* if ASB response */
#define STATUS_OFST 6
#define NETW_STATUS_OFST 6
if (status & ARB_CMD_INT) { /* ARB response */
SET_PAGE(ti->arb_page);
#if TR_VERBOSE
DPRINTK("ARB resp: cmd=%02X\n", readb(ti->arb));
#endif
switch (readb(ti->arb)) { /* ARB command check */
case DLC_STATUS:
DPRINTK("DLC_STATUS new status: %02X on station %02X\n",
ntohs(readw(ti->arb + STATUS_OFST)),
ntohs(readw(ti->arb+ STATION_ID_OFST)));
break;
case REC_DATA:
tr_rx(dev);
break;
case RING_STAT_CHANGE:{
unsigned short ring_status;
ring_status= ntohs(readw(ti->arb + NETW_STATUS_OFST));
if (ibmtr_debug_trace & TRC_INIT)
DPRINTK("Ring Status Change...(0x%x)\n",
ring_status);
if(ring_status& (REMOVE_RECV|AUTO_REMOVAL|LOBE_FAULT)){
netif_stop_queue(dev);
netif_carrier_off(dev);
DPRINTK("Remove received, or Auto-removal error"
", or Lobe fault\n");
DPRINTK("We'll try to reopen the closed adapter"
" after a %d second delay.\n",
TR_RETRY_INTERVAL/HZ);
/*I was confused: I saw the TR reopening but */
/*forgot:with an RJ45 in an RJ45/ICS adapter */
/*but adapter not in the ring, the TR will */
/* open, and then soon close and come here. */
ti->open_mode = AUTOMATIC;
ti->open_status = CLOSED; /*12/2000 BMS*/
ti->open_action = REOPEN;
ibmtr_reset_timer(&(ti->tr_timer), dev);
} else if (ring_status & LOG_OVERFLOW) {
if(netif_queue_stopped(dev))
ti->readlog_pending = 1;
else
ibmtr_readlog(dev);
}
break;
}
case XMIT_DATA_REQ:
tr_tx(dev);
break;
default:
DPRINTK("Unknown command %02X in arb\n",
(int) readb(ti->arb));
break;
} /* switch ARB command check */
writeb(~ARB_CMD_INT, ti->mmio+ ACA_OFFSET+ACA_RESET + ISRP_ODD);
writeb(ARB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
} /* if ARB response */
if (status & SSB_RESP_INT) { /* SSB response */
unsigned char retcode;
SET_PAGE(ti->ssb_page);
#if TR_VERBOSE
DPRINTK("SSB resp: cmd=%02X rsp=%02X\n",
readb(ti->ssb), readb(ti->ssb + 2));
#endif
switch (readb(ti->ssb)) { /* SSB command check */
case XMIT_DIR_FRAME:
case XMIT_UI_FRAME:
retcode = readb(ti->ssb + 2);
if (retcode && (retcode != 0x22))/* checks ret_code */
DPRINTK("xmit ret_code: %02X xmit error code: "
"%02X\n",
(int)retcode, (int)readb(ti->ssb + 6));
else
dev->stats.tx_packets++;
break;
case XMIT_XID_CMD:
DPRINTK("xmit xid ret_code: %02X\n",
(int) readb(ti->ssb + 2));
default:
DPRINTK("Unknown command %02X in ssb\n",
(int) readb(ti->ssb));
} /* SSB command check */
writeb(~SSB_RESP_INT, ti->mmio+ ACA_OFFSET+ACA_RESET+ ISRP_ODD);
writeb(SSB_FREE, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
} /* if SSB response */
#ifdef ENABLE_PAGING
writeb(save_srpr, ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
spin_unlock(&(ti->lock));
return IRQ_HANDLED;
} /*tok_interrupt */
/*****************************************************************************/
#define INIT_STATUS_OFST 1
#define INIT_STATUS_2_OFST 2
#define ENCODED_ADDRESS_OFST 8
static void initial_tok_int(struct net_device *dev)
{
__u32 encoded_addr, hw_encoded_addr;
struct tok_info *ti;
unsigned char init_status; /*BMS 12/2000*/
ti = netdev_priv(dev);
ti->do_tok_int = NOT_FIRST;
/* we assign the shared-ram address for ISA devices */
writeb(ti->sram_base, ti->mmio + ACA_OFFSET + ACA_RW + RRR_EVEN);
#ifndef PCMCIA
ti->sram_virt = ioremap(((__u32)ti->sram_base << 12), ti->avail_shared_ram);
#endif
ti->init_srb = map_address(ti,
ntohs(readw(ti->mmio + ACA_OFFSET + WRBR_EVEN)),
&ti->init_srb_page);
if (ti->page_mask && ti->avail_shared_ram == 127) {
void __iomem *last_512;
__u8 last_512_page=0;
int i;
last_512 = map_address(ti, 0xfe00, &last_512_page);
/* initialize high section of ram (if necessary) */
SET_PAGE(last_512_page);
for (i = 0; i < 512; i++)
writeb(0, last_512 + i);
}
SET_PAGE(ti->init_srb_page);
#if TR_VERBOSE
{
int i;
DPRINTK("ti->init_srb_page=0x%x\n", ti->init_srb_page);
DPRINTK("init_srb(%p):", ti->init_srb );
for (i = 0; i < 20; i++)
printk("%02X ", (int) readb(ti->init_srb + i));
printk("\n");
}
#endif
hw_encoded_addr = readw(ti->init_srb + ENCODED_ADDRESS_OFST);
encoded_addr = ntohs(hw_encoded_addr);
init_status= /*BMS 12/2000 check for shallow mode possibility (Turbo)*/
readb(ti->init_srb+offsetof(struct srb_init_response,init_status));
/*printk("Initial interrupt: init_status= 0x%02x\n",init_status);*/
ti->ring_speed = init_status & 0x01 ? 16 : 4;
DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n",
ti->ring_speed, (unsigned int)dev->mem_start);
ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0;
if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset);
else tok_open_adapter((unsigned long)dev);
} /*initial_tok_int() */
/*****************************************************************************/
#define CMD_CORRELATE_OFST 1
#define DHB_ADDRESS_OFST 6
#define FRAME_LENGTH_OFST 6
#define HEADER_LENGTH_OFST 8
#define RSAP_VALUE_OFST 9
static void tr_tx(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data;
unsigned int hdr_len;
__u32 dhb=0,dhb_base;
void __iomem *dhbuf = NULL;
unsigned char xmit_command;
int i,dhb_len=0x4000,src_len,src_offset;
struct trllc *llc;
struct srb_xmit xsrb;
__u8 dhb_page = 0;
__u8 llc_ssap;
SET_PAGE(ti->asb_page);
if (readb(ti->asb+RETCODE_OFST) != 0xFF) DPRINTK("ASB not free !!!\n");
/* in providing the transmit interrupts, is telling us it is ready for
data and providing a shared memory address for us to stuff with data.
Here we compute the effective address where we will place data.
*/
SET_PAGE(ti->arb_page);
dhb=dhb_base=ntohs(readw(ti->arb + DHB_ADDRESS_OFST));
if (ti->page_mask) {
dhb_page = (dhb_base >> 8) & ti->page_mask;
dhb=dhb_base & ~(ti->page_mask << 8);
}
dhbuf = ti->sram_virt + dhb;
/* Figure out the size of the 802.5 header */
if (!(trhdr->saddr[0] & 0x80)) /* RIF present? */
hdr_len = sizeof(struct trh_hdr) - TR_MAXRIFLEN;
else
hdr_len = ((ntohs(trhdr->rcf) & TR_RCF_LEN_MASK) >> 8)
+ sizeof(struct trh_hdr) - TR_MAXRIFLEN;
llc = (struct trllc *) (ti->current_skb->data + hdr_len);
llc_ssap = llc->ssap;
SET_PAGE(ti->srb_page);
memcpy_fromio(&xsrb, ti->srb, sizeof(xsrb));
SET_PAGE(ti->asb_page);
xmit_command = xsrb.command;
writeb(xmit_command, ti->asb + COMMAND_OFST);
writew(xsrb.station_id, ti->asb + STATION_ID_OFST);
writeb(llc_ssap, ti->asb + RSAP_VALUE_OFST);
writeb(xsrb.cmd_corr, ti->asb + CMD_CORRELATE_OFST);
writeb(0, ti->asb + RETCODE_OFST);
if ((xmit_command == XMIT_XID_CMD) || (xmit_command == XMIT_TEST_CMD)) {
writew(htons(0x11), ti->asb + FRAME_LENGTH_OFST);
writeb(0x0e, ti->asb + HEADER_LENGTH_OFST);
SET_PAGE(dhb_page);
writeb(AC, dhbuf);
writeb(LLC_FRAME, dhbuf + 1);
for (i = 0; i < TR_ALEN; i++)
writeb((int) 0x0FF, dhbuf + i + 2);
for (i = 0; i < TR_ALEN; i++)
writeb(0, dhbuf + i + TR_ALEN + 2);
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
/*
* the token ring packet is copied from sk_buff to the adapter
* buffer identified in the command data received with the interrupt.
*/
writeb(hdr_len, ti->asb + HEADER_LENGTH_OFST);
writew(htons(ti->current_skb->len), ti->asb + FRAME_LENGTH_OFST);
src_len=ti->current_skb->len;
src_offset=0;
dhb=dhb_base;
while(1) {
if (ti->page_mask) {
dhb_page=(dhb >> 8) & ti->page_mask;
dhb=dhb & ~(ti->page_mask << 8);
dhb_len=0x4000-dhb; /* remaining size of this page */
}
dhbuf = ti->sram_virt + dhb;
SET_PAGE(dhb_page);
if (src_len > dhb_len) {
memcpy_toio(dhbuf,&ti->current_skb->data[src_offset],
dhb_len);
src_len -= dhb_len;
src_offset += dhb_len;
dhb_base+=dhb_len;
dhb=dhb_base;
continue;
}
memcpy_toio(dhbuf, &ti->current_skb->data[src_offset], src_len);
break;
}
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
dev->stats.tx_bytes += ti->current_skb->len;
dev_kfree_skb_irq(ti->current_skb);
ti->current_skb = NULL;
netif_wake_queue(dev);
if (ti->readlog_pending)
ibmtr_readlog(dev);
} /*tr_tx */
/*****************************************************************************/
#define RECEIVE_BUFFER_OFST 6
#define LAN_HDR_LENGTH_OFST 8
#define DLC_HDR_LENGTH_OFST 9
#define DSAP_OFST 0
#define SSAP_OFST 1
#define LLC_OFST 2
#define PROTID_OFST 3
#define ETHERTYPE_OFST 6
static void tr_rx(struct net_device *dev)
{
struct tok_info *ti = netdev_priv(dev);
__u32 rbuffer;
void __iomem *rbuf, *rbufdata, *llc;
__u8 rbuffer_page = 0;
unsigned char *data;
unsigned int rbuffer_len, lan_hdr_len, hdr_len, ip_len, length;
unsigned char dlc_hdr_len;
struct sk_buff *skb;
unsigned int skb_size = 0;
int IPv4_p = 0;
unsigned int chksum = 0;
struct iphdr *iph;
struct arb_rec_req rarb;
SET_PAGE(ti->arb_page);
memcpy_fromio(&rarb, ti->arb, sizeof(rarb));
rbuffer = ntohs(rarb.rec_buf_addr) ;
rbuf = map_address(ti, rbuffer, &rbuffer_page);
SET_PAGE(ti->asb_page);
if (readb(ti->asb + RETCODE_OFST) !=0xFF) DPRINTK("ASB not free !!!\n");
writeb(REC_DATA, ti->asb + COMMAND_OFST);
writew(rarb.station_id, ti->asb + STATION_ID_OFST);
writew(rarb.rec_buf_addr, ti->asb + RECEIVE_BUFFER_OFST);
lan_hdr_len = rarb.lan_hdr_len;
if (lan_hdr_len > sizeof(struct trh_hdr)) {
DPRINTK("Linux cannot handle greater than 18 bytes RIF\n");
return;
} /*BMS I added this above just to be very safe */
dlc_hdr_len = readb(ti->arb + DLC_HDR_LENGTH_OFST);
hdr_len = lan_hdr_len + sizeof(struct trllc) + sizeof(struct iphdr);
SET_PAGE(rbuffer_page);
llc = rbuf + offsetof(struct rec_buf, data) + lan_hdr_len;
#if TR_VERBOSE
DPRINTK("offsetof data: %02X lan_hdr_len: %02X\n",
(__u32) offsetof(struct rec_buf, data), (unsigned int) lan_hdr_len);
DPRINTK("llc: %08X rec_buf_addr: %04X dev->mem_start: %lX\n",
llc, ntohs(rarb.rec_buf_addr), dev->mem_start);
DPRINTK("dsap: %02X, ssap: %02X, llc: %02X, protid: %02X%02X%02X, "
"ethertype: %04X\n",
(int) readb(llc + DSAP_OFST), (int) readb(llc + SSAP_OFST),
(int) readb(llc + LLC_OFST), (int) readb(llc + PROTID_OFST),
(int) readb(llc+PROTID_OFST+1),(int)readb(llc+PROTID_OFST + 2),
(int) ntohs(readw(llc + ETHERTYPE_OFST)));
#endif
if (readb(llc + offsetof(struct trllc, llc)) != UI_CMD) {
SET_PAGE(ti->asb_page);
writeb(DATA_LOST, ti->asb + RETCODE_OFST);
dev->stats.rx_dropped++;
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
length = ntohs(rarb.frame_len);
if (readb(llc + DSAP_OFST) == EXTENDED_SAP &&
readb(llc + SSAP_OFST) == EXTENDED_SAP &&
length >= hdr_len) IPv4_p = 1;
#if TR_VERBOSE
#define SADDR_OFST 8
#define DADDR_OFST 2
if (!IPv4_p) {
void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
u8 saddr[6];
u8 daddr[6];
int i;
for (i = 0 ; i < 6 ; i++)
saddr[i] = readb(trhhdr + SADDR_OFST + i);
for (i = 0 ; i < 6 ; i++)
daddr[i] = readb(trhhdr + DADDR_OFST + i);
DPRINTK("Probably non-IP frame received.\n");
DPRINTK("ssap: %02X dsap: %02X "
"saddr: %pM daddr: %pM\n",
readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
saddr, daddr);
}
#endif
/*BMS handle the case she comes in with few hops but leaves with many */
skb_size=length-lan_hdr_len+sizeof(struct trh_hdr)+sizeof(struct trllc);
if (!(skb = dev_alloc_skb(skb_size))) {
DPRINTK("out of memory. frame dropped.\n");
dev->stats.rx_dropped++;
SET_PAGE(ti->asb_page);
writeb(DATA_LOST, ti->asb + offsetof(struct asb_rec, ret_code));
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
return;
}
/*BMS again, if she comes in with few but leaves with many */
skb_reserve(skb, sizeof(struct trh_hdr) - lan_hdr_len);
skb_put(skb, length);
data = skb->data;
rbuffer_len = ntohs(readw(rbuf + offsetof(struct rec_buf, buf_len)));
rbufdata = rbuf + offsetof(struct rec_buf, data);
if (IPv4_p) {
/* Copy the headers without checksumming */
memcpy_fromio(data, rbufdata, hdr_len);
/* Watch for padded packets and bogons */
iph= (struct iphdr *)(data+ lan_hdr_len + sizeof(struct trllc));
ip_len = ntohs(iph->tot_len) - sizeof(struct iphdr);
length -= hdr_len;
if ((ip_len <= length) && (ip_len > 7))
length = ip_len;
data += hdr_len;
rbuffer_len -= hdr_len;
rbufdata += hdr_len;
}
/* Copy the payload... */
#define BUFFER_POINTER_OFST 2
#define BUFFER_LENGTH_OFST 6
for (;;) {
if (ibmtr_debug_trace&TRC_INITV && length < rbuffer_len)
DPRINTK("CURIOUS, length=%d < rbuffer_len=%d\n",
length,rbuffer_len);
if (IPv4_p)
chksum=csum_partial_copy_nocheck((void*)rbufdata,
data,length<rbuffer_len?length:rbuffer_len,chksum);
else
memcpy_fromio(data, rbufdata, rbuffer_len);
rbuffer = ntohs(readw(rbuf+BUFFER_POINTER_OFST)) ;
if (!rbuffer)
break;
rbuffer -= 2;
length -= rbuffer_len;
data += rbuffer_len;
rbuf = map_address(ti, rbuffer, &rbuffer_page);
SET_PAGE(rbuffer_page);
rbuffer_len = ntohs(readw(rbuf + BUFFER_LENGTH_OFST));
rbufdata = rbuf + offsetof(struct rec_buf, data);
}
SET_PAGE(ti->asb_page);
writeb(0, ti->asb + offsetof(struct asb_rec, ret_code));
writeb(RESP_IN_ASB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
skb->protocol = tr_type_trans(skb, dev);
if (IPv4_p) {
skb->csum = chksum;
skb->ip_summed = CHECKSUM_COMPLETE;
}
netif_rx(skb);
} /*tr_rx */
/*****************************************************************************/
static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
{
tmr->expires = jiffies + TR_RETRY_INTERVAL;
tmr->data = (unsigned long) dev;
tmr->function = tok_rerun;
init_timer(tmr);
add_timer(tmr);
}
/*****************************************************************************/
static void tok_rerun(unsigned long dev_addr)
{
struct net_device *dev = (struct net_device *)dev_addr;
struct tok_info *ti = netdev_priv(dev);
if ( ti->open_action == RESTART){
ti->do_tok_int = FIRST_INT;
outb(0, dev->base_addr + ADAPTRESETREL);
#ifdef ENABLE_PAGING
if (ti->page_mask)
writeb(SRPR_ENABLE_PAGING,
ti->mmio + ACA_OFFSET + ACA_RW + SRPR_EVEN);
#endif
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
} else
tok_open_adapter(dev_addr);
}
/*****************************************************************************/
static void ibmtr_readlog(struct net_device *dev)
{
struct tok_info *ti;
ti = netdev_priv(dev);
ti->readlog_pending = 0;
SET_PAGE(ti->srb_page);
writeb(DIR_READ_LOG, ti->srb);
writeb(INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_SET + ISRP_EVEN);
writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
netif_stop_queue(dev);
}
/*****************************************************************************/
static int ibmtr_change_mtu(struct net_device *dev, int mtu)
{
struct tok_info *ti = netdev_priv(dev);
if (ti->ring_speed == 16 && mtu > ti->maxmtu16)
return -EINVAL;
if (ti->ring_speed == 4 && mtu > ti->maxmtu4)
return -EINVAL;
dev->mtu = mtu;
return 0;
}
/*****************************************************************************/
#ifdef MODULE
/* 3COM 3C619C supports 8 interrupts, 32 I/O ports */
static struct net_device *dev_ibmtr[IBMTR_MAX_ADAPTERS];
static int io[IBMTR_MAX_ADAPTERS] = { 0xa20, 0xa24 };
static int irq[IBMTR_MAX_ADAPTERS];
static int mem[IBMTR_MAX_ADAPTERS];
MODULE_LICENSE("GPL");
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
static int __init ibmtr_init(void)
{
int i;
int count=0;
find_turbo_adapters(io);
for (i = 0; i < IBMTR_MAX_ADAPTERS && io[i]; i++) {
struct net_device *dev;
irq[i] = 0;
mem[i] = 0;
dev = alloc_trdev(sizeof(struct tok_info));
if (dev == NULL) {
if (i == 0)
return -ENOMEM;
break;
}
dev->base_addr = io[i];
dev->irq = irq[i];
dev->mem_start = mem[i];
if (ibmtr_probe_card(dev)) {
free_netdev(dev);
continue;
}
dev_ibmtr[i] = dev;
count++;
}
if (count) return 0;
printk("ibmtr: register_netdev() returned non-zero.\n");
return -EIO;
}
module_init(ibmtr_init);
static void __exit ibmtr_cleanup(void)
{
int i;
for (i = 0; i < IBMTR_MAX_ADAPTERS; i++){
if (!dev_ibmtr[i])
continue;
unregister_netdev(dev_ibmtr[i]);
ibmtr_cleanup_card(dev_ibmtr[i]);
free_netdev(dev_ibmtr[i]);
}
}
module_exit(ibmtr_cleanup);
#endif
| gpl-2.0 |
jeboo/kernel_JB_ZSLS6_i9100 | drivers/infiniband/hw/mthca/mthca_eq.c | 2636 | 25634 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_config_reg.h"
enum {
MTHCA_NUM_ASYNC_EQE = 0x80,
MTHCA_NUM_CMD_EQE = 0x80,
MTHCA_NUM_SPARE_EQE = 0x80,
MTHCA_EQ_ENTRY_SIZE = 0x20
};
/*
* Must be packed because start is 64 bits but only aligned to 32 bits.
*/
struct mthca_eq_context {
__be32 flags;
__be64 start;
__be32 logsize_usrpage;
__be32 tavor_pd; /* reserved for Arbel */
u8 reserved1[3];
u8 intr;
__be32 arbel_pd; /* lost_count for Tavor */
__be32 lkey;
u32 reserved2[2];
__be32 consumer_index;
__be32 producer_index;
u32 reserved3[4];
} __attribute__((packed));
#define MTHCA_EQ_STATUS_OK ( 0 << 28)
#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
#define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MTHCA_EQ_OWNER_SW ( 0 << 24)
#define MTHCA_EQ_OWNER_HW ( 1 << 24)
#define MTHCA_EQ_FLAG_TR ( 1 << 18)
#define MTHCA_EQ_FLAG_OI ( 1 << 17)
#define MTHCA_EQ_STATE_ARMED ( 1 << 8)
#define MTHCA_EQ_STATE_FIRED ( 2 << 8)
#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
#define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
enum {
MTHCA_EVENT_TYPE_COMP = 0x00,
MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
MTHCA_EVENT_TYPE_COMM_EST = 0x02,
MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
MTHCA_EVENT_TYPE_CMD = 0x0a
};
#define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
(1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
(1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
(1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24)
#define MTHCA_EQ_DB_REQ_NOT (2 << 24)
#define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
#define MTHCA_EQ_DB_SET_CI (4 << 24)
#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
struct mthca_eqe {
u8 reserved1;
u8 type;
u8 reserved2;
u8 subtype;
union {
u32 raw[6];
struct {
__be32 cqn;
} __attribute__((packed)) comp;
struct {
u16 reserved1;
__be16 token;
u32 reserved2;
u8 reserved3[3];
u8 status;
__be64 out_param;
} __attribute__((packed)) cmd;
struct {
__be32 qpn;
} __attribute__((packed)) qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
struct {
__be32 cqn;
u32 reserved1;
u8 reserved2[3];
u8 syndrome;
} __attribute__((packed)) cq_err;
struct {
u32 reserved1[2];
__be32 port;
} __attribute__((packed)) port_change;
} event;
u8 reserved3[3];
u8 owner;
} __attribute__((packed));
#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
static inline u64 async_mask(struct mthca_dev *dev)
{
return dev->mthca_flags & MTHCA_FLAG_SRQ ?
MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
MTHCA_ASYNC_EVENT_MASK;
}
static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
{
/*
* This barrier makes sure that all updates to ownership bits
* done by set_eqe_hw() hit memory before the consumer index
* is updated. set_eq_ci() allows the HCA to possibly write
* more EQ entries, and we want to avoid the exceedingly
* unlikely possibility of the HCA writing an entry and then
* having set_eqe_hw() overwrite the owner field.
*/
wmb();
mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
dev->kar + MTHCA_EQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
{
/* See comment in tavor_set_eq_ci() above. */
wmb();
__raw_writel((__force u32) cpu_to_be32(ci),
dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
{
if (mthca_is_memfree(dev))
arbel_set_eq_ci(dev, eq, ci);
else
tavor_set_eq_ci(dev, eq, ci);
}
static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
{
mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
dev->kar + MTHCA_EQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
{
writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
}
static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
{
if (!mthca_is_memfree(dev)) {
mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
dev->kar + MTHCA_EQ_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
}
static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
{
unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
}
static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
{
struct mthca_eqe *eqe;
eqe = get_eqe(eq, eq->cons_index);
return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
}
static inline void set_eqe_hw(struct mthca_eqe *eqe)
{
eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
}
static void port_change(struct mthca_dev *dev, int port, int active)
{
struct ib_event record;
mthca_dbg(dev, "Port change to %s for port %d\n",
active ? "active" : "down", port);
record.device = &dev->ib_dev;
record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
record.element.port_num = port;
ib_dispatch_event(&record);
}
static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
{
struct mthca_eqe *eqe;
int disarm_cqn;
int eqes_found = 0;
int set_ci = 0;
while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
rmb();
switch (eqe->type) {
case MTHCA_EVENT_TYPE_COMP:
disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
disarm_cq(dev, eq->eqn, disarm_cqn);
mthca_cq_completion(dev, disarm_cqn);
break;
case MTHCA_EVENT_TYPE_PATH_MIG:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_PATH_MIG);
break;
case MTHCA_EVENT_TYPE_COMM_EST:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_COMM_EST);
break;
case MTHCA_EVENT_TYPE_SQ_DRAINED:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_SQ_DRAINED);
break;
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_LAST_WQE_REACHED);
break;
case MTHCA_EVENT_TYPE_SRQ_LIMIT:
mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
IB_EVENT_SRQ_LIMIT_REACHED);
break;
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_FATAL);
break;
case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_PATH_MIG_ERR);
break;
case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_REQ_ERR);
break;
case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_ACCESS_ERR);
break;
case MTHCA_EVENT_TYPE_CMD:
mthca_cmd_event(dev,
be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param));
break;
case MTHCA_EVENT_TYPE_PORT_CHANGE:
port_change(dev,
(be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
eqe->subtype == 0x4);
break;
case MTHCA_EVENT_TYPE_CQ_ERROR:
mthca_warn(dev, "CQ %s on CQN %06x\n",
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
IB_EVENT_CQ_ERR);
break;
case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
case MTHCA_EVENT_TYPE_ECC_DETECT:
default:
mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
eqe->type, eqe->subtype, eq->eqn);
break;
};
set_eqe_hw(eqe);
++eq->cons_index;
eqes_found = 1;
++set_ci;
/*
* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MTHCA_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
/*
* Conditional on hca_type is OK here because
* this is a rare case, not the fast path.
*/
set_eq_ci(dev, eq, eq->cons_index);
set_ci = 0;
}
}
/*
* Rely on caller to set consumer index so that we don't have
* to test hca_type in our interrupt handling fast path.
*/
return eqes_found;
}
static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
{
struct mthca_dev *dev = dev_ptr;
u32 ecr;
int i;
if (dev->eq_table.clr_mask)
writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
if (!ecr)
return IRQ_NONE;
writel(ecr, dev->eq_regs.tavor.ecr_base +
MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (ecr & dev->eq_table.eq[i].eqn_mask) {
if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
dev->eq_table.eq[i].cons_index);
tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
}
return IRQ_HANDLED;
}
static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
{
struct mthca_eq *eq = eq_ptr;
struct mthca_dev *dev = eq->dev;
mthca_eq_int(dev, eq);
tavor_set_eq_ci(dev, eq, eq->cons_index);
tavor_eq_req_not(dev, eq->eqn);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
{
struct mthca_dev *dev = dev_ptr;
int work = 0;
int i;
if (dev->eq_table.clr_mask)
writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
work = 1;
arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
dev->eq_table.eq[i].cons_index);
}
arbel_eq_req_not(dev, dev->eq_table.arm_mask);
return IRQ_RETVAL(work);
}
static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
{
struct mthca_eq *eq = eq_ptr;
struct mthca_dev *dev = eq->dev;
mthca_eq_int(dev, eq);
arbel_set_eq_ci(dev, eq, eq->cons_index);
arbel_eq_req_not(dev, eq->eqn_mask);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
static int mthca_create_eq(struct mthca_dev *dev,
int nent,
u8 intr,
struct mthca_eq *eq)
{
int npages;
u64 *dma_list = NULL;
dma_addr_t t;
struct mthca_mailbox *mailbox;
struct mthca_eq_context *eq_context;
int err = -ENOMEM;
int i;
u8 status;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
if (!eq->page_list)
goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
goto err_out_free;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
goto err_out_free;
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
PAGE_SIZE, &t, GFP_KERNEL);
if (!eq->page_list[i].buf)
goto err_out_free_pages;
dma_list[i] = t;
dma_unmap_addr_set(&eq->page_list[i], mapping, t);
clear_page(eq->page_list[i].buf);
}
for (i = 0; i < eq->nent; ++i)
set_eqe_hw(get_eqe(eq, i));
eq->eqn = mthca_alloc(&dev->eq_table.alloc);
if (eq->eqn == -1)
goto err_out_free_pages;
err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
dma_list, PAGE_SHIFT, npages,
0, npages * PAGE_SIZE,
MTHCA_MPT_FLAG_LOCAL_WRITE |
MTHCA_MPT_FLAG_LOCAL_READ,
&eq->mr);
if (err)
goto err_out_free_eq;
memset(eq_context, 0, sizeof *eq_context);
eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
MTHCA_EQ_OWNER_HW |
MTHCA_EQ_STATE_ARMED |
MTHCA_EQ_FLAG_TR);
if (mthca_is_memfree(dev))
eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
if (mthca_is_memfree(dev)) {
eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
} else {
eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
}
eq_context->intr = intr;
eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
if (err) {
mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
goto err_out_free_mr;
}
if (status) {
mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
status);
err = -EINVAL;
goto err_out_free_mr;
}
kfree(dma_list);
mthca_free_mailbox(dev, mailbox);
eq->eqn_mask = swab32(1 << eq->eqn);
eq->cons_index = 0;
dev->eq_table.arm_mask |= eq->eqn_mask;
mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
eq->eqn, eq->nent);
return err;
err_out_free_mr:
mthca_free_mr(dev, &eq->mr);
err_out_free_eq:
mthca_free(&dev->eq_table.alloc, eq->eqn);
err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
dma_unmap_addr(&eq->page_list[i],
mapping));
mthca_free_mailbox(dev, mailbox);
err_out_free:
kfree(eq->page_list);
kfree(dma_list);
err_out:
return err;
}
static void mthca_free_eq(struct mthca_dev *dev,
struct mthca_eq *eq)
{
struct mthca_mailbox *mailbox;
int err;
u8 status;
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return;
err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
if (err)
mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
if (status)
mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
dev->eq_table.arm_mask &= ~eq->eqn_mask;
if (0) {
mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
mthca_free_mr(dev, &eq->mr);
for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE,
eq->page_list[i].buf,
dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox);
}
static void mthca_free_irqs(struct mthca_dev *dev)
{
int i;
if (dev->eq_table.have_irq)
free_irq(dev->pdev->irq, dev);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (dev->eq_table.eq[i].have_irq) {
free_irq(dev->eq_table.eq[i].msi_x_vector,
dev->eq_table.eq + i);
dev->eq_table.eq[i].have_irq = 0;
}
}
static int mthca_map_reg(struct mthca_dev *dev,
unsigned long offset, unsigned long size,
void __iomem **map)
{
phys_addr_t base = pci_resource_start(dev->pdev, 0);
*map = ioremap(base + offset, size);
if (!*map)
return -ENOMEM;
return 0;
}
static int mthca_map_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
/*
* We assume that the EQ arm and EQ set CI registers
* fall within the first BAR. We can't trust the
* values firmware gives us, since those addresses are
* valid on the HCA's side of the PCI bus but not
* necessarily the host side.
*/
if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
&dev->clr_base)) {
mthca_err(dev, "Couldn't map interrupt clear register, "
"aborting.\n");
return -ENOMEM;
}
/*
* Add 4 because we limit ourselves to EQs 0 ... 31,
* so we only need the low word of the register.
*/
if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_arm_base) + 4, 4,
&dev->eq_regs.arbel.eq_arm)) {
mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
iounmap(dev->clr_base);
return -ENOMEM;
}
if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_set_ci_base,
MTHCA_EQ_SET_CI_SIZE,
&dev->eq_regs.arbel.eq_set_ci_base)) {
mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
iounmap(dev->eq_regs.arbel.eq_arm);
iounmap(dev->clr_base);
return -ENOMEM;
}
} else {
if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
&dev->clr_base)) {
mthca_err(dev, "Couldn't map interrupt clear register, "
"aborting.\n");
return -ENOMEM;
}
if (mthca_map_reg(dev, MTHCA_ECR_BASE,
MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
&dev->eq_regs.tavor.ecr_base)) {
mthca_err(dev, "Couldn't map ecr register, "
"aborting.\n");
iounmap(dev->clr_base);
return -ENOMEM;
}
}
return 0;
}
static void mthca_unmap_eq_regs(struct mthca_dev *dev)
{
if (mthca_is_memfree(dev)) {
iounmap(dev->eq_regs.arbel.eq_set_ci_base);
iounmap(dev->eq_regs.arbel.eq_arm);
iounmap(dev->clr_base);
} else {
iounmap(dev->eq_regs.tavor.ecr_base);
iounmap(dev->clr_base);
}
}
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
{
int ret;
u8 status;
/*
* We assume that mapping one page is enough for the whole EQ
* context table. This is fine with all current HCAs, because
* we only use 32 EQs and each EQ uses 32 bytes of context
* memory, or 1 KB total.
*/
dev->eq_table.icm_virt = icm_virt;
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!dev->eq_table.icm_page)
return -ENOMEM;
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page);
return -ENOMEM;
}
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
if (!ret && status)
ret = -EINVAL;
if (ret) {
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
return ret;
}
void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
u8 status;
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
}
int mthca_init_eq_table(struct mthca_dev *dev)
{
int err;
u8 status;
u8 intr;
int i;
err = mthca_alloc_init(&dev->eq_table.alloc,
dev->limits.num_eqs,
dev->limits.num_eqs - 1,
dev->limits.reserved_eqs);
if (err)
return err;
err = mthca_map_eq_regs(dev);
if (err)
goto err_out_free;
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
dev->eq_table.clr_mask = 0;
} else {
dev->eq_table.clr_mask =
swab32(1 << (dev->eq_table.inta_pin & 31));
dev->eq_table.clr_int = dev->clr_base +
(dev->eq_table.inta_pin < 32 ? 4 : 0);
}
dev->eq_table.arm_mask = 0;
intr = dev->eq_table.inta_pin;
err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
&dev->eq_table.eq[MTHCA_EQ_COMP]);
if (err)
goto err_out_unmap;
err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
&dev->eq_table.eq[MTHCA_EQ_ASYNC]);
if (err)
goto err_out_comp;
err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
&dev->eq_table.eq[MTHCA_EQ_CMD]);
if (err)
goto err_out_async;
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
static const char *eq_name[] = {
[MTHCA_EQ_COMP] = DRV_NAME "-comp",
[MTHCA_EQ_ASYNC] = DRV_NAME "-async",
[MTHCA_EQ_CMD] = DRV_NAME "-cmd"
};
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
snprintf(dev->eq_table.eq[i].irq_name,
IB_DEVICE_NAME_MAX,
"%s@pci:%s", eq_name[i],
pci_name(dev->pdev));
err = request_irq(dev->eq_table.eq[i].msi_x_vector,
mthca_is_memfree(dev) ?
mthca_arbel_msi_x_interrupt :
mthca_tavor_msi_x_interrupt,
0, dev->eq_table.eq[i].irq_name,
dev->eq_table.eq + i);
if (err)
goto err_out_cmd;
dev->eq_table.eq[i].have_irq = 1;
}
} else {
snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
DRV_NAME "@pci:%s", pci_name(dev->pdev));
err = request_irq(dev->pdev->irq,
mthca_is_memfree(dev) ?
mthca_arbel_interrupt :
mthca_tavor_interrupt,
IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
if (err)
goto err_out_cmd;
dev->eq_table.have_irq = 1;
}
err = mthca_MAP_EQ(dev, async_mask(dev),
0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
if (err)
mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
if (status)
mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
if (err)
mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
if (status)
mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
else
tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
return 0;
err_out_cmd:
mthca_free_irqs(dev);
mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
err_out_async:
mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
err_out_comp:
mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
err_out_unmap:
mthca_unmap_eq_regs(dev);
err_out_free:
mthca_alloc_cleanup(&dev->eq_table.alloc);
return err;
}
void mthca_cleanup_eq_table(struct mthca_dev *dev)
{
u8 status;
int i;
mthca_free_irqs(dev);
mthca_MAP_EQ(dev, async_mask(dev),
1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
mthca_free_eq(dev, &dev->eq_table.eq[i]);
mthca_unmap_eq_regs(dev);
mthca_alloc_cleanup(&dev->eq_table.alloc);
}
| gpl-2.0 |
CyanogenMod/android_kernel_oneplus_msm8974 | drivers/input/mouse/psmouse-base.c | 4172 | 45763 | /*
* PS/2 mouse driver
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 2003-2004 Dmitry Torokhov
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define psmouse_fmt(fmt) fmt
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#include <linux/libps2.h>
#include <linux/mutex.h>
#include "psmouse.h"
#include "synaptics.h"
#include "logips2pp.h"
#include "alps.h"
#include "hgpk.h"
#include "lifebook.h"
#include "trackpoint.h"
#include "touchkit_ps2.h"
#include "elantech.h"
#include "sentelic.h"
#define DRIVER_DESC "PS/2 mouse driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
static struct kernel_param_ops param_ops_proto_abbrev = {
.set = psmouse_set_maxproto,
.get = psmouse_get_maxproto,
};
#define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644);
MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches.");
static unsigned int psmouse_resolution = 200;
module_param_named(resolution, psmouse_resolution, uint, 0644);
MODULE_PARM_DESC(resolution, "Resolution, in dpi.");
static unsigned int psmouse_rate = 100;
module_param_named(rate, psmouse_rate, uint, 0644);
MODULE_PARM_DESC(rate, "Report rate, in reports per second.");
static bool psmouse_smartscroll = 1;
module_param_named(smartscroll, psmouse_smartscroll, bool, 0644);
MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled.");
static unsigned int psmouse_resetafter = 5;
module_param_named(resetafter, psmouse_resetafter, uint, 0644);
MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
static unsigned int psmouse_resync_time;
module_param_named(resync_time, psmouse_resync_time, uint, 0644);
MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never).");
PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
NULL,
psmouse_attr_show_protocol, psmouse_attr_set_protocol);
PSMOUSE_DEFINE_ATTR(rate, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, rate),
psmouse_show_int_attr, psmouse_attr_set_rate);
PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resolution),
psmouse_show_int_attr, psmouse_attr_set_resolution);
PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resetafter),
psmouse_show_int_attr, psmouse_set_int_attr);
PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resync_time),
psmouse_show_int_attr, psmouse_set_int_attr);
static struct attribute *psmouse_attributes[] = {
&psmouse_attr_protocol.dattr.attr,
&psmouse_attr_rate.dattr.attr,
&psmouse_attr_resolution.dattr.attr,
&psmouse_attr_resetafter.dattr.attr,
&psmouse_attr_resync_time.dattr.attr,
NULL
};
static struct attribute_group psmouse_attribute_group = {
.attrs = psmouse_attributes,
};
/*
* psmouse_mutex protects all operations changing state of mouse
* (connecting, disconnecting, changing rate or resolution via
* sysfs). We could use a per-device semaphore but since there
* rarely more than one PS/2 mouse connected and since semaphore
* is taken in "slow" paths it is not worth it.
*/
static DEFINE_MUTEX(psmouse_mutex);
static struct workqueue_struct *kpsmoused_wq;
struct psmouse_protocol {
enum psmouse_type type;
bool maxproto;
bool ignore_parity; /* Protocol should ignore parity errors from KBC */
const char *name;
const char *alias;
int (*detect)(struct psmouse *, bool);
int (*init)(struct psmouse *);
};
/*
* psmouse_process_byte() analyzes the PS/2 data stream and reports
* relevant events to the input module once full packet has arrived.
*/
psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
if (psmouse->pktcnt < psmouse->pktsize)
return PSMOUSE_GOOD_DATA;
/*
* Full packet accumulated, process it
*/
/*
* Scroll wheel on IntelliMice, scroll buttons on NetMice
*/
if (psmouse->type == PSMOUSE_IMPS || psmouse->type == PSMOUSE_GENPS)
input_report_rel(dev, REL_WHEEL, -(signed char) packet[3]);
/*
* Scroll wheel and buttons on IntelliMouse Explorer
*/
if (psmouse->type == PSMOUSE_IMEX) {
switch (packet[3] & 0xC0) {
case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
break;
case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
break;
case 0x00:
case 0xC0:
input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
break;
}
}
/*
* Extra buttons on Genius NewNet 3D
*/
if (psmouse->type == PSMOUSE_GENPS) {
input_report_key(dev, BTN_SIDE, (packet[0] >> 6) & 1);
input_report_key(dev, BTN_EXTRA, (packet[0] >> 7) & 1);
}
/*
* Extra button on ThinkingMouse
*/
if (psmouse->type == PSMOUSE_THINKPS) {
input_report_key(dev, BTN_EXTRA, (packet[0] >> 3) & 1);
/* Without this bit of weirdness moving up gives wildly high Y changes. */
packet[1] |= (packet[0] & 0x40) << 1;
}
/*
* Cortron PS2 Trackball reports SIDE button on the 4th bit of the first
* byte.
*/
if (psmouse->type == PSMOUSE_CORTRON) {
input_report_key(dev, BTN_SIDE, (packet[0] >> 3) & 1);
packet[0] |= 0x08;
}
/*
* Generic PS/2 Mouse
*/
input_report_key(dev, BTN_LEFT, packet[0] & 1);
input_report_key(dev, BTN_MIDDLE, (packet[0] >> 2) & 1);
input_report_key(dev, BTN_RIGHT, (packet[0] >> 1) & 1);
input_report_rel(dev, REL_X, packet[1] ? (int) packet[1] - (int) ((packet[0] << 4) & 0x100) : 0);
input_report_rel(dev, REL_Y, packet[2] ? (int) ((packet[0] << 3) & 0x100) - (int) packet[2] : 0);
input_sync(dev);
return PSMOUSE_FULL_PACKET;
}
void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work,
unsigned long delay)
{
queue_delayed_work(kpsmoused_wq, work, delay);
}
/*
* __psmouse_set_state() sets new psmouse state and resets all flags.
*/
static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
{
psmouse->state = new_state;
psmouse->pktcnt = psmouse->out_of_sync_cnt = 0;
psmouse->ps2dev.flags = 0;
psmouse->last = jiffies;
}
/*
* psmouse_set_state() sets new psmouse state and resets all flags and
* counters while holding serio lock so fighting with interrupt handler
* is not a concern.
*/
void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
{
serio_pause_rx(psmouse->ps2dev.serio);
__psmouse_set_state(psmouse, new_state);
serio_continue_rx(psmouse->ps2dev.serio);
}
/*
* psmouse_handle_byte() processes one byte of the input data stream
* by calling corresponding protocol handler.
*/
static int psmouse_handle_byte(struct psmouse *psmouse)
{
psmouse_ret_t rc = psmouse->protocol_handler(psmouse);
switch (rc) {
case PSMOUSE_BAD_DATA:
if (psmouse->state == PSMOUSE_ACTIVATED) {
psmouse_warn(psmouse,
"%s at %s lost sync at byte %d\n",
psmouse->name, psmouse->phys,
psmouse->pktcnt);
if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse_notice(psmouse,
"issuing reconnect request\n");
serio_reconnect(psmouse->ps2dev.serio);
return -1;
}
}
psmouse->pktcnt = 0;
break;
case PSMOUSE_FULL_PACKET:
psmouse->pktcnt = 0;
if (psmouse->out_of_sync_cnt) {
psmouse->out_of_sync_cnt = 0;
psmouse_notice(psmouse,
"%s at %s - driver resynced.\n",
psmouse->name, psmouse->phys);
}
break;
case PSMOUSE_GOOD_DATA:
break;
}
return 0;
}
/*
* psmouse_interrupt() handles incoming characters, either passing them
* for normal processing or gathering them as command response.
*/
static irqreturn_t psmouse_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
if (psmouse->state == PSMOUSE_IGNORE)
goto out;
if (unlikely((flags & SERIO_TIMEOUT) ||
((flags & SERIO_PARITY) && !psmouse->ignore_parity))) {
if (psmouse->state == PSMOUSE_ACTIVATED)
psmouse_warn(psmouse,
"bad data from KBC -%s%s\n",
flags & SERIO_TIMEOUT ? " timeout" : "",
flags & SERIO_PARITY ? " bad parity" : "");
ps2_cmd_aborted(&psmouse->ps2dev);
goto out;
}
if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_ACK))
if (ps2_handle_ack(&psmouse->ps2dev, data))
goto out;
if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_CMD))
if (ps2_handle_response(&psmouse->ps2dev, data))
goto out;
if (psmouse->state <= PSMOUSE_RESYNCING)
goto out;
if (psmouse->state == PSMOUSE_ACTIVATED &&
psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
psmouse_info(psmouse, "%s at %s lost synchronization, throwing %d bytes away.\n",
psmouse->name, psmouse->phys, psmouse->pktcnt);
psmouse->badbyte = psmouse->packet[0];
__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
psmouse_queue_work(psmouse, &psmouse->resync_work, 0);
goto out;
}
psmouse->packet[psmouse->pktcnt++] = data;
/*
* Check if this is a new device announcement (0xAA 0x00)
*/
if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
if (psmouse->pktcnt == 1) {
psmouse->last = jiffies;
goto out;
}
if (psmouse->packet[1] == PSMOUSE_RET_ID ||
(psmouse->type == PSMOUSE_HGPK &&
psmouse->packet[1] == PSMOUSE_RET_BAT)) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
serio_reconnect(serio);
goto out;
}
/*
* Not a new device, try processing first byte normally
*/
psmouse->pktcnt = 1;
if (psmouse_handle_byte(psmouse))
goto out;
psmouse->packet[psmouse->pktcnt++] = data;
}
/*
* See if we need to force resync because mouse was idle for too long
*/
if (psmouse->state == PSMOUSE_ACTIVATED &&
psmouse->pktcnt == 1 && psmouse->resync_time &&
time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) {
psmouse->badbyte = psmouse->packet[0];
__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
psmouse_queue_work(psmouse, &psmouse->resync_work, 0);
goto out;
}
psmouse->last = jiffies;
psmouse_handle_byte(psmouse);
out:
return IRQ_HANDLED;
}
/*
* psmouse_sliced_command() sends an extended PS/2 command to the mouse
* using sliced syntax, understood by advanced devices, such as Logitech
* or Synaptics touchpads. The command is encoded as:
* 0xE6 0xE8 rr 0xE8 ss 0xE8 tt 0xE8 uu where (rr*64)+(ss*16)+(tt*4)+uu
* is the command.
*/
int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command)
{
int i;
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
return -1;
for (i = 6; i >= 0; i -= 2) {
unsigned char d = (command >> i) & 3;
if (ps2_command(&psmouse->ps2dev, &d, PSMOUSE_CMD_SETRES))
return -1;
}
return 0;
}
/*
* psmouse_reset() resets the mouse into power-on state.
*/
int psmouse_reset(struct psmouse *psmouse)
{
unsigned char param[2];
if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_RESET_BAT))
return -1;
if (param[0] != PSMOUSE_RET_BAT && param[1] != PSMOUSE_RET_ID)
return -1;
return 0;
}
/*
* Here we set the mouse resolution.
*/
void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
{
static const unsigned char params[] = { 0, 1, 2, 2, 3 };
unsigned char p;
if (resolution == 0 || resolution > 200)
resolution = 200;
p = params[resolution / 50];
ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
psmouse->resolution = 25 << p;
}
/*
* Here we set the mouse report rate.
*/
static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
{
static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
unsigned char r;
int i = 0;
while (rates[i] > rate) i++;
r = rates[i];
ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
psmouse->rate = r;
}
/*
* psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
*/
static int psmouse_poll(struct psmouse *psmouse)
{
return ps2_command(&psmouse->ps2dev, psmouse->packet,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
}
/*
* Genius NetMouse magic init.
*/
static int genius_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[4];
param[0] = 3;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO);
if (param[0] != 0x00 || param[1] != 0x33 || param[2] != 0x55)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
psmouse->vendor = "Genius";
psmouse->name = "Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* IntelliMouse magic init.
*/
static int intellimouse_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 100;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 3)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Wheel Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* Try IntelliMouse/Explorer magic init.
*/
static int im_explorer_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
intellimouse_detect(psmouse, 0);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 4)
return -1;
/* Magic to enable horizontal scrolling on IntelliMouse 4.0 */
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 40;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
__set_bit(REL_HWHEEL, psmouse->dev->relbit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Explorer Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* Kensington ThinkingMouse / ExpertMouse magic init.
*/
static int thinking_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
static const unsigned char seq[] = { 20, 60, 40, 20, 20, 60, 40, 20, 20 };
int i;
param[0] = 10;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 0;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
for (i = 0; i < ARRAY_SIZE(seq); i++) {
param[0] = seq[i];
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
}
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 2)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
psmouse->vendor = "Kensington";
psmouse->name = "ThinkingMouse";
}
return 0;
}
/*
* Bare PS/2 protocol "detection". Always succeeds.
*/
static int ps2bare_detect(struct psmouse *psmouse, bool set_properties)
{
if (set_properties) {
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Mouse";
/*
* We have no way of figuring true number of buttons so let's
* assume that the device has 3.
*/
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
}
return 0;
}
/*
* Cortron PS/2 protocol detection. There's no special way to detect it, so it
* must be forced by sysfs protocol writing.
*/
static int cortron_detect(struct psmouse *psmouse, bool set_properties)
{
if (set_properties) {
psmouse->vendor = "Cortron";
psmouse->name = "PS/2 Trackball";
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
}
return 0;
}
/*
* Apply default settings to the psmouse structure. Most of them will
* be overridden by individual protocol initialization routines.
*/
static void psmouse_apply_defaults(struct psmouse *psmouse)
{
struct input_dev *input_dev = psmouse->dev;
memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_REL, input_dev->evbit);
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->poll = psmouse_poll;
psmouse->protocol_handler = psmouse_process_byte;
psmouse->pktsize = 3;
psmouse->reconnect = NULL;
psmouse->disconnect = NULL;
psmouse->cleanup = NULL;
psmouse->pt_activate = NULL;
psmouse->pt_deactivate = NULL;
}
/*
* Apply default settings to the psmouse structure and call specified
* protocol detection or initialization routine.
*/
static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
bool set_properties),
struct psmouse *psmouse, bool set_properties)
{
if (set_properties)
psmouse_apply_defaults(psmouse);
return detect(psmouse, set_properties);
}
/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
*/
static int psmouse_extensions(struct psmouse *psmouse,
unsigned int max_proto, bool set_properties)
{
bool synaptics_hardware = false;
/*
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
}
}
/*
* Try Kensington ThinkingMouse (we try first, because synaptics probe
* upsets the thinkingmouse).
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
return PSMOUSE_THINKPS;
}
/*
* Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
* support is disabled in config - we need to know if it is synaptics so we
* can reset it properly after probing for intellimouse.
*/
if (max_proto > PSMOUSE_PS2 &&
psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
/*
* Try activating protocol, but check if support is enabled first, since
* we try detecting Synaptics even when protocol is disabled.
*/
if (synaptics_supported() &&
(!set_properties || synaptics_init(psmouse) == 0)) {
return PSMOUSE_SYNAPTICS;
}
/*
* Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
* Unfortunately Logitech/Genius probes confuse some firmware versions so
* we'll have to skip them.
*/
max_proto = PSMOUSE_IMEX;
}
/*
* Make sure that touchpad is in relative mode, gestures (taps) are enabled
*/
synaptics_reset(psmouse);
}
/*
* Try ALPS TouchPad
*/
if (max_proto > PSMOUSE_IMEX) {
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
if (psmouse_do_detect(alps_detect,
psmouse, set_properties) == 0) {
if (!set_properties || alps_init(psmouse) == 0)
return PSMOUSE_ALPS;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
}
/*
* Try OLPC HGPK touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
if (!set_properties || hgpk_init(psmouse) == 0)
return PSMOUSE_HGPK;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
/*
* Try Elantech touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
if (!set_properties || elantech_init(psmouse) == 0)
return PSMOUSE_ELANTECH;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
if (max_proto > PSMOUSE_IMEX) {
if (psmouse_do_detect(genius_detect,
psmouse, set_properties) == 0)
return PSMOUSE_GENPS;
if (psmouse_do_detect(ps2pp_init,
psmouse, set_properties) == 0)
return PSMOUSE_PS2PP;
if (psmouse_do_detect(trackpoint_detect,
psmouse, set_properties) == 0)
return PSMOUSE_TRACKPOINT;
if (psmouse_do_detect(touchkit_ps2_detect,
psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
/*
* Try Finger Sensing Pad. We do it here because its probe upsets
* Trackpoint devices (causing TP_READ_ID command to time out).
*/
if (max_proto > PSMOUSE_IMEX) {
if (psmouse_do_detect(fsp_detect,
psmouse, set_properties) == 0) {
if (!set_properties || fsp_init(psmouse) == 0)
return PSMOUSE_FSP;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
}
/*
* Reset to defaults in case the device got confused by extended
* protocol probes. Note that we follow up with full reset because
* some mice put themselves to sleep when they see PSMOUSE_RESET_DIS.
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
if (max_proto >= PSMOUSE_IMEX &&
psmouse_do_detect(im_explorer_detect,
psmouse, set_properties) == 0) {
return PSMOUSE_IMEX;
}
if (max_proto >= PSMOUSE_IMPS &&
psmouse_do_detect(intellimouse_detect,
psmouse, set_properties) == 0) {
return PSMOUSE_IMPS;
}
/*
* Okay, all failed, we have a standard mouse here. The number of the buttons
* is still a question, though. We assume 3.
*/
psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
if (synaptics_hardware) {
/*
* We detected Synaptics hardware but it did not respond to IMPS/2 probes.
* We need to reset the touchpad because if there is a track point on the
* pass through port it could get disabled while probing for protocol
* extensions.
*/
psmouse_reset(psmouse);
}
return PSMOUSE_PS2;
}
static const struct psmouse_protocol psmouse_protocols[] = {
{
.type = PSMOUSE_PS2,
.name = "PS/2",
.alias = "bare",
.maxproto = true,
.ignore_parity = true,
.detect = ps2bare_detect,
},
#ifdef CONFIG_MOUSE_PS2_LOGIPS2PP
{
.type = PSMOUSE_PS2PP,
.name = "PS2++",
.alias = "logitech",
.detect = ps2pp_init,
},
#endif
{
.type = PSMOUSE_THINKPS,
.name = "ThinkPS/2",
.alias = "thinkps",
.detect = thinking_detect,
},
{
.type = PSMOUSE_GENPS,
.name = "GenPS/2",
.alias = "genius",
.detect = genius_detect,
},
{
.type = PSMOUSE_IMPS,
.name = "ImPS/2",
.alias = "imps",
.maxproto = true,
.ignore_parity = true,
.detect = intellimouse_detect,
},
{
.type = PSMOUSE_IMEX,
.name = "ImExPS/2",
.alias = "exps",
.maxproto = true,
.ignore_parity = true,
.detect = im_explorer_detect,
},
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
{
.type = PSMOUSE_SYNAPTICS,
.name = "SynPS/2",
.alias = "synaptics",
.detect = synaptics_detect,
.init = synaptics_init,
},
{
.type = PSMOUSE_SYNAPTICS_RELATIVE,
.name = "SynRelPS/2",
.alias = "synaptics-relative",
.detect = synaptics_detect,
.init = synaptics_init_relative,
},
#endif
#ifdef CONFIG_MOUSE_PS2_ALPS
{
.type = PSMOUSE_ALPS,
.name = "AlpsPS/2",
.alias = "alps",
.detect = alps_detect,
.init = alps_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_LIFEBOOK
{
.type = PSMOUSE_LIFEBOOK,
.name = "LBPS/2",
.alias = "lifebook",
.init = lifebook_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
{
.type = PSMOUSE_TRACKPOINT,
.name = "TPPS/2",
.alias = "trackpoint",
.detect = trackpoint_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_TOUCHKIT
{
.type = PSMOUSE_TOUCHKIT_PS2,
.name = "touchkitPS/2",
.alias = "touchkit",
.detect = touchkit_ps2_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_OLPC
{
.type = PSMOUSE_HGPK,
.name = "OLPC HGPK",
.alias = "hgpk",
.detect = hgpk_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_ELANTECH
{
.type = PSMOUSE_ELANTECH,
.name = "ETPS/2",
.alias = "elantech",
.detect = elantech_detect,
.init = elantech_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_SENTELIC
{
.type = PSMOUSE_FSP,
.name = "FSPPS/2",
.alias = "fsp",
.detect = fsp_detect,
.init = fsp_init,
},
#endif
{
.type = PSMOUSE_CORTRON,
.name = "CortronPS/2",
.alias = "cortps",
.detect = cortron_detect,
},
{
.type = PSMOUSE_AUTO,
.name = "auto",
.alias = "any",
.maxproto = true,
},
};
static const struct psmouse_protocol *psmouse_protocol_by_type(enum psmouse_type type)
{
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++)
if (psmouse_protocols[i].type == type)
return &psmouse_protocols[i];
WARN_ON(1);
return &psmouse_protocols[0];
}
static const struct psmouse_protocol *psmouse_protocol_by_name(const char *name, size_t len)
{
const struct psmouse_protocol *p;
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) {
p = &psmouse_protocols[i];
if ((strlen(p->name) == len && !strncmp(p->name, name, len)) ||
(strlen(p->alias) == len && !strncmp(p->alias, name, len)))
return &psmouse_protocols[i];
}
return NULL;
}
/*
* psmouse_probe() probes for a PS/2 mouse.
*/
static int psmouse_probe(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
/*
* First, we check if it's a mouse. It should send 0x00 or 0x03
* in case of an IntelliMouse in 4-byte mode or 0x04 for IM Explorer.
* Sunrex K8561 IR Keyboard/Mouse reports 0xff on second and subsequent
* ID queries, probably due to a firmware bug.
*/
param[0] = 0xa5;
if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
return -1;
if (param[0] != 0x00 && param[0] != 0x03 &&
param[0] != 0x04 && param[0] != 0xff)
return -1;
/*
* Then we reset and disable the mouse so that it doesn't generate events.
*/
if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_DIS))
psmouse_warn(psmouse, "Failed to reset mouse on %s\n",
ps2dev->serio->phys);
return 0;
}
/*
* psmouse_initialize() initializes the mouse to a sane state.
*/
static void psmouse_initialize(struct psmouse *psmouse)
{
/*
* We set the mouse report rate, resolution and scaling.
*/
if (psmouse_max_proto != PSMOUSE_PS2) {
psmouse->set_rate(psmouse, psmouse->rate);
psmouse->set_resolution(psmouse, psmouse->resolution);
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
}
}
/*
* psmouse_activate() enables the mouse so that we get motion reports from it.
*/
int psmouse_activate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
psmouse_warn(psmouse, "Failed to enable mouse on %s\n",
psmouse->ps2dev.serio->phys);
return -1;
}
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
return 0;
}
/*
* psmouse_deactivate() puts the mouse into poll mode so that we don't get motion
* reports from it unless we explicitly request it.
*/
int psmouse_deactivate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) {
psmouse_warn(psmouse, "Failed to deactivate mouse on %s\n",
psmouse->ps2dev.serio->phys);
return -1;
}
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
return 0;
}
/*
* psmouse_resync() attempts to re-validate current protocol.
*/
static void psmouse_resync(struct work_struct *work)
{
struct psmouse *parent = NULL, *psmouse =
container_of(work, struct psmouse, resync_work.work);
struct serio *serio = psmouse->ps2dev.serio;
psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
bool failed = false, enabled = false;
int i;
mutex_lock(&psmouse_mutex);
if (psmouse->state != PSMOUSE_RESYNCING)
goto out;
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
/*
* Some mice don't ACK commands sent while they are in the middle of
* transmitting motion packet. To avoid delay we use ps2_sendbyte()
* instead of ps2_command() which would wait for 200ms for an ACK
* that may never come.
* As an additional quirk ALPS touchpads may not only forget to ACK
* disable command but will stop reporting taps, so if we see that
* mouse at least once ACKs disable we will do full reconnect if ACK
* is missing.
*/
psmouse->num_resyncs++;
if (ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20)) {
if (psmouse->num_resyncs < 3 || psmouse->acks_disable_command)
failed = true;
} else
psmouse->acks_disable_command = true;
/*
* Poll the mouse. If it was reset the packet will be shorter than
* psmouse->pktsize and ps2_command will fail. We do not expect and
* do not handle scenario when mouse "upgrades" its protocol while
* disconnected since it would require additional delay. If we ever
* see a mouse that does it we'll adjust the code.
*/
if (!failed) {
if (psmouse->poll(psmouse))
failed = true;
else {
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
for (i = 0; i < psmouse->pktsize; i++) {
psmouse->pktcnt++;
rc = psmouse->protocol_handler(psmouse);
if (rc != PSMOUSE_GOOD_DATA)
break;
}
if (rc != PSMOUSE_FULL_PACKET)
failed = true;
psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
}
}
/*
* Now try to enable mouse. We try to do that even if poll failed and also
* repeat our attempts 5 times, otherwise we may be left out with disabled
* mouse.
*/
for (i = 0; i < 5; i++) {
if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
enabled = true;
break;
}
msleep(200);
}
if (!enabled) {
psmouse_warn(psmouse, "failed to re-enable mouse on %s\n",
psmouse->ps2dev.serio->phys);
failed = true;
}
if (failed) {
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse_info(psmouse,
"resync failed, issuing reconnect request\n");
serio_reconnect(serio);
} else
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
if (parent)
psmouse_activate(parent);
out:
mutex_unlock(&psmouse_mutex);
}
/*
* psmouse_cleanup() resets the mouse into power-on state.
*/
static void psmouse_cleanup(struct serio *serio)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
/*
* Disable stream mode so cleanup routine can proceed undisturbed.
*/
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
psmouse_warn(psmouse, "Failed to disable mouse on %s\n",
psmouse->ps2dev.serio->phys);
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
/*
* Reset the mouse to defaults (bare PS/2 protocol).
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
/*
* Some boxes, such as HP nx7400, get terribly confused if mouse
* is not fully enabled before suspending/shutting down.
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
if (parent) {
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_activate(parent);
}
mutex_unlock(&psmouse_mutex);
}
/*
* psmouse_disconnect() closes and frees.
*/
static void psmouse_disconnect(struct serio *serio)
{
struct psmouse *psmouse, *parent = NULL;
psmouse = serio_get_drvdata(serio);
sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
mutex_lock(&psmouse_mutex);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
/* make sure we don't have a resync in progress */
mutex_unlock(&psmouse_mutex);
flush_workqueue(kpsmoused_wq);
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
if (parent && parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(psmouse->dev);
kfree(psmouse);
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
}
static int psmouse_switch_protocol(struct psmouse *psmouse,
const struct psmouse_protocol *proto)
{
const struct psmouse_protocol *selected_proto;
struct input_dev *input_dev = psmouse->dev;
input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
if (proto && (proto->detect || proto->init)) {
psmouse_apply_defaults(psmouse);
if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
if (proto->init && proto->init(psmouse) < 0)
return -1;
psmouse->type = proto->type;
selected_proto = proto;
} else {
psmouse->type = psmouse_extensions(psmouse,
psmouse_max_proto, true);
selected_proto = psmouse_protocol_by_type(psmouse->type);
}
psmouse->ignore_parity = selected_proto->ignore_parity;
/*
* If mouse's packet size is 3 there is no point in polling the
* device in hopes to detect protocol reset - we won't get less
* than 3 bytes response anyhow.
*/
if (psmouse->pktsize == 3)
psmouse->resync_time = 0;
/*
* Some smart KVMs fake response to POLL command returning just
* 3 bytes and messing up our resync logic, so if initial poll
* fails we won't try polling the device anymore. Hopefully
* such KVM will maintain initially selected protocol.
*/
if (psmouse->resync_time && psmouse->poll(psmouse))
psmouse->resync_time = 0;
snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s",
selected_proto->name, psmouse->vendor, psmouse->name);
input_dev->name = psmouse->devname;
input_dev->phys = psmouse->phys;
input_dev->id.bustype = BUS_I8042;
input_dev->id.vendor = 0x0002;
input_dev->id.product = psmouse->type;
input_dev->id.version = psmouse->model;
return 0;
}
/*
* psmouse_connect() is a callback from the serio module when
* an unhandled serio port is found.
*/
static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
{
struct psmouse *psmouse, *parent = NULL;
struct input_dev *input_dev;
int retval = 0, error = -ENOMEM;
mutex_lock(&psmouse_mutex);
/*
* If this is a pass-through port deactivate parent so the device
* connected to this port can be successfully identified
*/
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse = kzalloc(sizeof(struct psmouse), GFP_KERNEL);
input_dev = input_allocate_device();
if (!psmouse || !input_dev)
goto err_free;
ps2_init(&psmouse->ps2dev, serio);
INIT_DELAYED_WORK(&psmouse->resync_work, psmouse_resync);
psmouse->dev = input_dev;
snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
serio_set_drvdata(serio, psmouse);
error = serio_open(serio, drv);
if (error)
goto err_clear_drvdata;
if (psmouse_probe(psmouse) < 0) {
error = -ENODEV;
goto err_close_serio;
}
psmouse->rate = psmouse_rate;
psmouse->resolution = psmouse_resolution;
psmouse->resetafter = psmouse_resetafter;
psmouse->resync_time = parent ? 0 : psmouse_resync_time;
psmouse->smartscroll = psmouse_smartscroll;
psmouse_switch_protocol(psmouse, NULL);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
psmouse_initialize(psmouse);
error = input_register_device(psmouse->dev);
if (error)
goto err_protocol_disconnect;
if (parent && parent->pt_activate)
parent->pt_activate(parent);
error = sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group);
if (error)
goto err_pt_deactivate;
psmouse_activate(psmouse);
out:
/* If this is a pass-through port the parent needs to be re-activated */
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
return retval;
err_pt_deactivate:
if (parent && parent->pt_deactivate)
parent->pt_deactivate(parent);
input_unregister_device(psmouse->dev);
input_dev = NULL; /* so we don't try to free it below */
err_protocol_disconnect:
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
err_close_serio:
serio_close(serio);
err_clear_drvdata:
serio_set_drvdata(serio, NULL);
err_free:
input_free_device(input_dev);
kfree(psmouse);
retval = error;
goto out;
}
static int psmouse_reconnect(struct serio *serio)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
struct serio_driver *drv = serio->drv;
unsigned char type;
int rc = -1;
if (!drv || !psmouse) {
psmouse_dbg(psmouse,
"reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
if (psmouse->reconnect) {
if (psmouse->reconnect(psmouse))
goto out;
} else {
psmouse_reset(psmouse);
if (psmouse_probe(psmouse) < 0)
goto out;
type = psmouse_extensions(psmouse, psmouse_max_proto, false);
if (psmouse->type != type)
goto out;
}
/*
* OK, the device type (and capabilities) match the old one,
* we can continue using it, complete initialization
*/
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
psmouse_initialize(psmouse);
if (parent && parent->pt_activate)
parent->pt_activate(parent);
psmouse_activate(psmouse);
rc = 0;
out:
/* If this is a pass-through port the parent waits to be activated */
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
return rc;
}
static struct serio_device_id psmouse_serio_ids[] = {
{
.type = SERIO_8042,
.proto = SERIO_ANY,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_PS_PSTHRU,
.proto = SERIO_ANY,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
static struct serio_driver psmouse_drv = {
.driver = {
.name = "psmouse",
},
.description = DRIVER_DESC,
.id_table = psmouse_serio_ids,
.interrupt = psmouse_interrupt,
.connect = psmouse_connect,
.reconnect = psmouse_reconnect,
.disconnect = psmouse_disconnect,
.cleanup = psmouse_cleanup,
};
ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse;
psmouse = serio_get_drvdata(serio);
return attr->show(psmouse, attr->data, buf);
}
ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse, *parent = NULL;
int retval;
retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
goto out;
psmouse = serio_get_drvdata(serio);
if (attr->protect) {
if (psmouse->state == PSMOUSE_IGNORE) {
retval = -ENODEV;
goto out_unlock;
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_deactivate(psmouse);
}
retval = attr->set(psmouse, attr->data, buf, count);
if (attr->protect) {
if (retval != -ENODEV)
psmouse_activate(psmouse);
if (parent)
psmouse_activate(parent);
}
out_unlock:
mutex_unlock(&psmouse_mutex);
out:
return retval;
}
static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char *buf)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
return sprintf(buf, "%u\n", *field);
}
static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
*field = value;
return count;
}
static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, void *data, char *buf)
{
return sprintf(buf, "%s\n", psmouse_protocol_by_type(psmouse->type)->name);
}
static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
struct serio *serio = psmouse->ps2dev.serio;
struct psmouse *parent = NULL;
struct input_dev *old_dev, *new_dev;
const struct psmouse_protocol *proto, *old_proto;
int error;
int retry = 0;
proto = psmouse_protocol_by_name(buf, count);
if (!proto)
return -EINVAL;
if (psmouse->type == proto->type)
return count;
new_dev = input_allocate_device();
if (!new_dev)
return -ENOMEM;
while (!list_empty(&serio->children)) {
if (++retry > 3) {
psmouse_warn(psmouse,
"failed to destroy children ports, protocol change aborted.\n");
input_free_device(new_dev);
return -EIO;
}
mutex_unlock(&psmouse_mutex);
serio_unregister_child_port(serio);
mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
input_free_device(new_dev);
return -ENODEV;
}
if (psmouse->type == proto->type) {
input_free_device(new_dev);
return count; /* switched by other thread */
}
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
}
old_dev = psmouse->dev;
old_proto = psmouse_protocol_by_type(psmouse->type);
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse->dev = new_dev;
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
if (psmouse_switch_protocol(psmouse, proto) < 0) {
psmouse_reset(psmouse);
/* default to PSMOUSE_PS2 */
psmouse_switch_protocol(psmouse, &psmouse_protocols[0]);
}
psmouse_initialize(psmouse);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
error = input_register_device(psmouse->dev);
if (error) {
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
input_free_device(new_dev);
psmouse->dev = old_dev;
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
psmouse_switch_protocol(psmouse, old_proto);
psmouse_initialize(psmouse);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
return error;
}
input_unregister_device(old_dev);
if (parent && parent->pt_activate)
parent->pt_activate(parent);
return count;
}
static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
psmouse->set_rate(psmouse, value);
return count;
}
static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
psmouse->set_resolution(psmouse, value);
return count;
}
static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp)
{
const struct psmouse_protocol *proto;
if (!val)
return -EINVAL;
proto = psmouse_protocol_by_name(val, strlen(val));
if (!proto || !proto->maxproto)
return -EINVAL;
*((unsigned int *)kp->arg) = proto->type;
return 0;
}
static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)
{
int err;
lifebook_module_init();
synaptics_module_init();
hgpk_module_init();
kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
if (!kpsmoused_wq) {
pr_err("failed to create kpsmoused workqueue\n");
return -ENOMEM;
}
err = serio_register_driver(&psmouse_drv);
if (err)
destroy_workqueue(kpsmoused_wq);
return err;
}
static void __exit psmouse_exit(void)
{
serio_unregister_driver(&psmouse_drv);
destroy_workqueue(kpsmoused_wq);
}
module_init(psmouse_init);
module_exit(psmouse_exit);
| gpl-2.0 |
ErikAndren/linux | drivers/media/rc/keymaps/rc-msi-digivox-ii.c | 4684 | 1971 | /*
* MSI DIGIVOX mini II remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table msi_digivox_ii[] = {
{ 0x0302, KEY_2 },
{ 0x0303, KEY_UP }, /* up */
{ 0x0304, KEY_3 },
{ 0x0305, KEY_CHANNELDOWN },
{ 0x0308, KEY_5 },
{ 0x0309, KEY_0 },
{ 0x030b, KEY_8 },
{ 0x030d, KEY_DOWN }, /* down */
{ 0x0310, KEY_9 },
{ 0x0311, KEY_7 },
{ 0x0314, KEY_VOLUMEUP },
{ 0x0315, KEY_CHANNELUP },
{ 0x0316, KEY_OK },
{ 0x0317, KEY_POWER2 },
{ 0x031a, KEY_1 },
{ 0x031c, KEY_4 },
{ 0x031d, KEY_6 },
{ 0x031f, KEY_VOLUMEDOWN },
};
static struct rc_map_list msi_digivox_ii_map = {
.map = {
.scan = msi_digivox_ii,
.size = ARRAY_SIZE(msi_digivox_ii),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_MSI_DIGIVOX_II,
}
};
static int __init init_rc_map_msi_digivox_ii(void)
{
return rc_map_register(&msi_digivox_ii_map);
}
static void __exit exit_rc_map_msi_digivox_ii(void)
{
rc_map_unregister(&msi_digivox_ii_map);
}
module_init(init_rc_map_msi_digivox_ii)
module_exit(exit_rc_map_msi_digivox_ii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
| gpl-2.0 |
tycoo/moto_x_kernel | tools/perf/builtin-diff.c | 4940 | 6742 | /*
* builtin-diff.c
*
* Builtin diff command: Analyze two perf.data input files, look up and read
* DSOs and symbol information, sort them and produce a diff.
*/
#include "builtin.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/hist.h"
#include "util/evsel.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/sort.h"
#include "util/symbol.h"
#include "util/util.h"
#include <stdlib.h>
static char const *input_old = "perf.data.old",
*input_new = "perf.data";
static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
struct perf_diff {
struct perf_tool tool;
struct perf_session *session;
};
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
{
if (__hists__add_entry(self, al, NULL, period) != NULL)
return 0;
return -ENOMEM;
}
static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
struct machine *machine)
{
struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
struct perf_session *session = _diff->session;
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (al.filtered || al.sym == NULL)
return 0;
if (hists__add_entry(&session->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
session->hists.stats.total_period += sample->period;
return 0;
}
static struct perf_diff diff = {
.tool = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.exit = perf_event__process_task,
.fork = perf_event__process_task,
.lost = perf_event__process_lost,
.ordered_samples = true,
.ordering_requires_timestamps = true,
},
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__cmp(he, iter) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
}
static void hists__resort_entries(struct hists *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
struct rb_node *next = rb_first(&self->entries);
while (next != NULL) {
struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &self->entries);
n->position = position++;
perf_session__insert_hist_entry_by_name(&tmp, n);
}
self->entries = tmp;
}
static struct hist_entry *hists__find_entry(struct hists *self,
struct hist_entry *he)
{
struct rb_node *n = self->entries.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
int64_t cmp = hist_entry__cmp(he, iter);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return iter;
}
return NULL;
}
static void hists__match(struct hists *older, struct hists *newer)
{
struct rb_node *nd;
for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) {
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
pos->pair = hists__find_entry(older, pos);
}
}
static int __cmd_diff(void)
{
int ret, i;
#define older (session[0])
#define newer (session[1])
struct perf_session *session[2];
older = perf_session__new(input_old, O_RDONLY, force, false,
&diff.tool);
newer = perf_session__new(input_new, O_RDONLY, force, false,
&diff.tool);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
diff.session = session[i];
ret = perf_session__process_events(session[i], &diff.tool);
if (ret)
goto out_delete;
hists__output_resort(&session[i]->hists);
}
if (show_displacement)
hists__resort_entries(&older->hists);
hists__match(&older->hists, &newer->hists);
hists__fprintf(&newer->hists, &older->hists,
show_displacement, true, 0, 0, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
return ret;
#undef older
#undef newer
}
static const char * const diff_usage[] = {
"perf diff [<options>] [old_file] [new_file]",
NULL,
};
static const struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('M', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_END()
};
int cmd_diff(int argc, const char **argv, const char *prefix __used)
{
sort_order = diff__default_sort_order;
argc = parse_options(argc, argv, options, diff_usage, 0);
if (argc) {
if (argc > 2)
usage_with_options(diff_usage, options);
if (argc == 2) {
input_old = argv[0];
input_new = argv[1];
} else
input_new = argv[0];
} else if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms) {
input_old = "perf.data.host";
input_new = "perf.data.guest";
}
symbol_conf.exclude_other = false;
if (symbol__init() < 0)
return -1;
setup_sorting(diff_usage, options);
setup_pager();
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL);
return __cmd_diff();
}
| gpl-2.0 |
UberCM/kernel_asus_flo | tools/perf/builtin-diff.c | 4940 | 6742 | /*
* builtin-diff.c
*
* Builtin diff command: Analyze two perf.data input files, look up and read
* DSOs and symbol information, sort them and produce a diff.
*/
#include "builtin.h"
#include "util/debug.h"
#include "util/event.h"
#include "util/hist.h"
#include "util/evsel.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/sort.h"
#include "util/symbol.h"
#include "util/util.h"
#include <stdlib.h>
static char const *input_old = "perf.data.old",
*input_new = "perf.data";
static char diff__default_sort_order[] = "dso,symbol";
static bool force;
static bool show_displacement;
struct perf_diff {
struct perf_tool tool;
struct perf_session *session;
};
static int hists__add_entry(struct hists *self,
struct addr_location *al, u64 period)
{
if (__hists__add_entry(self, al, NULL, period) != NULL)
return 0;
return -ENOMEM;
}
static int diff__process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
struct machine *machine)
{
struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
struct perf_session *session = _diff->session;
struct addr_location al;
if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (al.filtered || al.sym == NULL)
return 0;
if (hists__add_entry(&session->hists, &al, sample->period)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
}
session->hists.stats.total_period += sample->period;
return 0;
}
static struct perf_diff diff = {
.tool = {
.sample = diff__process_sample_event,
.mmap = perf_event__process_mmap,
.comm = perf_event__process_comm,
.exit = perf_event__process_task,
.fork = perf_event__process_task,
.lost = perf_event__process_lost,
.ordered_samples = true,
.ordering_requires_timestamps = true,
},
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__cmp(he, iter) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
}
static void hists__resort_entries(struct hists *self)
{
unsigned long position = 1;
struct rb_root tmp = RB_ROOT;
struct rb_node *next = rb_first(&self->entries);
while (next != NULL) {
struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &self->entries);
n->position = position++;
perf_session__insert_hist_entry_by_name(&tmp, n);
}
self->entries = tmp;
}
static struct hist_entry *hists__find_entry(struct hists *self,
struct hist_entry *he)
{
struct rb_node *n = self->entries.rb_node;
while (n) {
struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
int64_t cmp = hist_entry__cmp(he, iter);
if (cmp < 0)
n = n->rb_left;
else if (cmp > 0)
n = n->rb_right;
else
return iter;
}
return NULL;
}
static void hists__match(struct hists *older, struct hists *newer)
{
struct rb_node *nd;
for (nd = rb_first(&newer->entries); nd; nd = rb_next(nd)) {
struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
pos->pair = hists__find_entry(older, pos);
}
}
static int __cmd_diff(void)
{
int ret, i;
#define older (session[0])
#define newer (session[1])
struct perf_session *session[2];
older = perf_session__new(input_old, O_RDONLY, force, false,
&diff.tool);
newer = perf_session__new(input_new, O_RDONLY, force, false,
&diff.tool);
if (session[0] == NULL || session[1] == NULL)
return -ENOMEM;
for (i = 0; i < 2; ++i) {
diff.session = session[i];
ret = perf_session__process_events(session[i], &diff.tool);
if (ret)
goto out_delete;
hists__output_resort(&session[i]->hists);
}
if (show_displacement)
hists__resort_entries(&older->hists);
hists__match(&older->hists, &newer->hists);
hists__fprintf(&newer->hists, &older->hists,
show_displacement, true, 0, 0, stdout);
out_delete:
for (i = 0; i < 2; ++i)
perf_session__delete(session[i]);
return ret;
#undef older
#undef newer
}
static const char * const diff_usage[] = {
"perf diff [<options>] [old_file] [new_file]",
NULL,
};
static const struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('M', "displacement", &show_displacement,
"Show position displacement relative to baseline"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
OPT_END()
};
int cmd_diff(int argc, const char **argv, const char *prefix __used)
{
sort_order = diff__default_sort_order;
argc = parse_options(argc, argv, options, diff_usage, 0);
if (argc) {
if (argc > 2)
usage_with_options(diff_usage, options);
if (argc == 2) {
input_old = argv[0];
input_new = argv[1];
} else
input_new = argv[0];
} else if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms) {
input_old = "perf.data.host";
input_new = "perf.data.guest";
}
symbol_conf.exclude_other = false;
if (symbol__init() < 0)
return -1;
setup_sorting(diff_usage, options);
setup_pager();
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL);
return __cmd_diff();
}
| gpl-2.0 |
KylinUI/android_kernel_htc_m7 | drivers/media/common/tuners/mt2063.c | 4940 | 67602 | /*
* Driver for mt2063 Micronas tuner
*
* Copyright (c) 2011 Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This driver came from a driver originally written by:
* Henry Wang <Henry.wang@AzureWave.com>
* Made publicly available by Terratec, at:
* http://linux.terratec.de/files/TERRATEC_H7/20110323_TERRATEC_H7_Linux.tar.gz
* The original driver's license is GPL, as declared with MODULE_LICENSE()
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation under version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/videodev2.h>
#include "mt2063.h"
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Set Verbosity level");
#define dprintk(level, fmt, arg...) do { \
if (debug >= level) \
printk(KERN_DEBUG "mt2063 %s: " fmt, __func__, ## arg); \
} while (0)
/* positive error codes used internally */
/* Info: Unavoidable LO-related spur may be present in the output */
#define MT2063_SPUR_PRESENT_ERR (0x00800000)
/* Info: Mask of bits used for # of LO-related spurs that were avoided during tuning */
#define MT2063_SPUR_CNT_MASK (0x001f0000)
#define MT2063_SPUR_SHIFT (16)
/* Info: Upconverter frequency is out of range (may be reason for MT_UPC_UNLOCK) */
#define MT2063_UPC_RANGE (0x04000000)
/* Info: Downconverter frequency is out of range (may be reason for MT_DPC_UNLOCK) */
#define MT2063_DNC_RANGE (0x08000000)
/*
* Constant defining the version of the following structure
* and therefore the API for this code.
*
* When compiling the tuner driver, the preprocessor will
* check against this version number to make sure that
* it matches the version that the tuner driver knows about.
*/
/* DECT Frequency Avoidance */
#define MT2063_DECT_AVOID_US_FREQS 0x00000001
#define MT2063_DECT_AVOID_EURO_FREQS 0x00000002
#define MT2063_EXCLUDE_US_DECT_FREQUENCIES(s) (((s) & MT2063_DECT_AVOID_US_FREQS) != 0)
#define MT2063_EXCLUDE_EURO_DECT_FREQUENCIES(s) (((s) & MT2063_DECT_AVOID_EURO_FREQS) != 0)
enum MT2063_DECT_Avoid_Type {
MT2063_NO_DECT_AVOIDANCE = 0, /* Do not create DECT exclusion zones. */
MT2063_AVOID_US_DECT = MT2063_DECT_AVOID_US_FREQS, /* Avoid US DECT frequencies. */
MT2063_AVOID_EURO_DECT = MT2063_DECT_AVOID_EURO_FREQS, /* Avoid European DECT frequencies. */
MT2063_AVOID_BOTH /* Avoid both regions. Not typically used. */
};
#define MT2063_MAX_ZONES 48
struct MT2063_ExclZone_t {
u32 min_;
u32 max_;
struct MT2063_ExclZone_t *next_;
};
/*
* Structure of data needed for Spur Avoidance
*/
struct MT2063_AvoidSpursData_t {
u32 f_ref;
u32 f_in;
u32 f_LO1;
u32 f_if1_Center;
u32 f_if1_Request;
u32 f_if1_bw;
u32 f_LO2;
u32 f_out;
u32 f_out_bw;
u32 f_LO1_Step;
u32 f_LO2_Step;
u32 f_LO1_FracN_Avoid;
u32 f_LO2_FracN_Avoid;
u32 f_zif_bw;
u32 f_min_LO_Separation;
u32 maxH1;
u32 maxH2;
enum MT2063_DECT_Avoid_Type avoidDECT;
u32 bSpurPresent;
u32 bSpurAvoided;
u32 nSpursFound;
u32 nZones;
struct MT2063_ExclZone_t *freeZones;
struct MT2063_ExclZone_t *usedZones;
struct MT2063_ExclZone_t MT2063_ExclZones[MT2063_MAX_ZONES];
};
/*
* Parameter for function MT2063_SetPowerMask that specifies the power down
* of various sections of the MT2063.
*/
enum MT2063_Mask_Bits {
MT2063_REG_SD = 0x0040, /* Shutdown regulator */
MT2063_SRO_SD = 0x0020, /* Shutdown SRO */
MT2063_AFC_SD = 0x0010, /* Shutdown AFC A/D */
MT2063_PD_SD = 0x0002, /* Enable power detector shutdown */
MT2063_PDADC_SD = 0x0001, /* Enable power detector A/D shutdown */
MT2063_VCO_SD = 0x8000, /* Enable VCO shutdown */
MT2063_LTX_SD = 0x4000, /* Enable LTX shutdown */
MT2063_LT1_SD = 0x2000, /* Enable LT1 shutdown */
MT2063_LNA_SD = 0x1000, /* Enable LNA shutdown */
MT2063_UPC_SD = 0x0800, /* Enable upconverter shutdown */
MT2063_DNC_SD = 0x0400, /* Enable downconverter shutdown */
MT2063_VGA_SD = 0x0200, /* Enable VGA shutdown */
MT2063_AMP_SD = 0x0100, /* Enable AMP shutdown */
MT2063_ALL_SD = 0xFF73, /* All shutdown bits for this tuner */
MT2063_NONE_SD = 0x0000 /* No shutdown bits */
};
/*
* Possible values for MT2063_DNC_OUTPUT
*/
enum MT2063_DNC_Output_Enable {
MT2063_DNC_NONE = 0,
MT2063_DNC_1,
MT2063_DNC_2,
MT2063_DNC_BOTH
};
/*
* Two-wire serial bus subaddresses of the tuner registers.
* Also known as the tuner's register addresses.
*/
enum MT2063_Register_Offsets {
MT2063_REG_PART_REV = 0, /* 0x00: Part/Rev Code */
MT2063_REG_LO1CQ_1, /* 0x01: LO1C Queued Byte 1 */
MT2063_REG_LO1CQ_2, /* 0x02: LO1C Queued Byte 2 */
MT2063_REG_LO2CQ_1, /* 0x03: LO2C Queued Byte 1 */
MT2063_REG_LO2CQ_2, /* 0x04: LO2C Queued Byte 2 */
MT2063_REG_LO2CQ_3, /* 0x05: LO2C Queued Byte 3 */
MT2063_REG_RSVD_06, /* 0x06: Reserved */
MT2063_REG_LO_STATUS, /* 0x07: LO Status */
MT2063_REG_FIFFC, /* 0x08: FIFF Center */
MT2063_REG_CLEARTUNE, /* 0x09: ClearTune Filter */
MT2063_REG_ADC_OUT, /* 0x0A: ADC_OUT */
MT2063_REG_LO1C_1, /* 0x0B: LO1C Byte 1 */
MT2063_REG_LO1C_2, /* 0x0C: LO1C Byte 2 */
MT2063_REG_LO2C_1, /* 0x0D: LO2C Byte 1 */
MT2063_REG_LO2C_2, /* 0x0E: LO2C Byte 2 */
MT2063_REG_LO2C_3, /* 0x0F: LO2C Byte 3 */
MT2063_REG_RSVD_10, /* 0x10: Reserved */
MT2063_REG_PWR_1, /* 0x11: PWR Byte 1 */
MT2063_REG_PWR_2, /* 0x12: PWR Byte 2 */
MT2063_REG_TEMP_STATUS, /* 0x13: Temp Status */
MT2063_REG_XO_STATUS, /* 0x14: Crystal Status */
MT2063_REG_RF_STATUS, /* 0x15: RF Attn Status */
MT2063_REG_FIF_STATUS, /* 0x16: FIF Attn Status */
MT2063_REG_LNA_OV, /* 0x17: LNA Attn Override */
MT2063_REG_RF_OV, /* 0x18: RF Attn Override */
MT2063_REG_FIF_OV, /* 0x19: FIF Attn Override */
MT2063_REG_LNA_TGT, /* 0x1A: Reserved */
MT2063_REG_PD1_TGT, /* 0x1B: Pwr Det 1 Target */
MT2063_REG_PD2_TGT, /* 0x1C: Pwr Det 2 Target */
MT2063_REG_RSVD_1D, /* 0x1D: Reserved */
MT2063_REG_RSVD_1E, /* 0x1E: Reserved */
MT2063_REG_RSVD_1F, /* 0x1F: Reserved */
MT2063_REG_RSVD_20, /* 0x20: Reserved */
MT2063_REG_BYP_CTRL, /* 0x21: Bypass Control */
MT2063_REG_RSVD_22, /* 0x22: Reserved */
MT2063_REG_RSVD_23, /* 0x23: Reserved */
MT2063_REG_RSVD_24, /* 0x24: Reserved */
MT2063_REG_RSVD_25, /* 0x25: Reserved */
MT2063_REG_RSVD_26, /* 0x26: Reserved */
MT2063_REG_RSVD_27, /* 0x27: Reserved */
MT2063_REG_FIFF_CTRL, /* 0x28: FIFF Control */
MT2063_REG_FIFF_OFFSET, /* 0x29: FIFF Offset */
MT2063_REG_CTUNE_CTRL, /* 0x2A: Reserved */
MT2063_REG_CTUNE_OV, /* 0x2B: Reserved */
MT2063_REG_CTRL_2C, /* 0x2C: Reserved */
MT2063_REG_FIFF_CTRL2, /* 0x2D: Fiff Control */
MT2063_REG_RSVD_2E, /* 0x2E: Reserved */
MT2063_REG_DNC_GAIN, /* 0x2F: DNC Control */
MT2063_REG_VGA_GAIN, /* 0x30: VGA Gain Ctrl */
MT2063_REG_RSVD_31, /* 0x31: Reserved */
MT2063_REG_TEMP_SEL, /* 0x32: Temperature Selection */
MT2063_REG_RSVD_33, /* 0x33: Reserved */
MT2063_REG_RSVD_34, /* 0x34: Reserved */
MT2063_REG_RSVD_35, /* 0x35: Reserved */
MT2063_REG_RSVD_36, /* 0x36: Reserved */
MT2063_REG_RSVD_37, /* 0x37: Reserved */
MT2063_REG_RSVD_38, /* 0x38: Reserved */
MT2063_REG_RSVD_39, /* 0x39: Reserved */
MT2063_REG_RSVD_3A, /* 0x3A: Reserved */
MT2063_REG_RSVD_3B, /* 0x3B: Reserved */
MT2063_REG_RSVD_3C, /* 0x3C: Reserved */
MT2063_REG_END_REGS
};
struct mt2063_state {
struct i2c_adapter *i2c;
bool init;
const struct mt2063_config *config;
struct dvb_tuner_ops ops;
struct dvb_frontend *frontend;
struct tuner_state status;
u32 frequency;
u32 srate;
u32 bandwidth;
u32 reference;
u32 tuner_id;
struct MT2063_AvoidSpursData_t AS_Data;
u32 f_IF1_actual;
u32 rcvr_mode;
u32 ctfilt_sw;
u32 CTFiltMax[31];
u32 num_regs;
u8 reg[MT2063_REG_END_REGS];
};
/*
* mt2063_write - Write data into the I2C bus
*/
static u32 mt2063_write(struct mt2063_state *state, u8 reg, u8 *data, u32 len)
{
struct dvb_frontend *fe = state->frontend;
int ret;
u8 buf[60];
struct i2c_msg msg = {
.addr = state->config->tuner_address,
.flags = 0,
.buf = buf,
.len = len + 1
};
dprintk(2, "\n");
msg.buf[0] = reg;
memcpy(msg.buf + 1, data, len);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer(state->i2c, &msg, 1);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (ret < 0)
printk(KERN_ERR "%s error ret=%d\n", __func__, ret);
return ret;
}
/*
* mt2063_write - Write register data into the I2C bus, caching the value
*/
static u32 mt2063_setreg(struct mt2063_state *state, u8 reg, u8 val)
{
u32 status;
dprintk(2, "\n");
if (reg >= MT2063_REG_END_REGS)
return -ERANGE;
status = mt2063_write(state, reg, &val, 1);
if (status < 0)
return status;
state->reg[reg] = val;
return 0;
}
/*
* mt2063_read - Read data from the I2C bus
*/
static u32 mt2063_read(struct mt2063_state *state,
u8 subAddress, u8 *pData, u32 cnt)
{
u32 status = 0; /* Status to be returned */
struct dvb_frontend *fe = state->frontend;
u32 i = 0;
dprintk(2, "addr 0x%02x, cnt %d\n", subAddress, cnt);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
for (i = 0; i < cnt; i++) {
u8 b0[] = { subAddress + i };
struct i2c_msg msg[] = {
{
.addr = state->config->tuner_address,
.flags = 0,
.buf = b0,
.len = 1
}, {
.addr = state->config->tuner_address,
.flags = I2C_M_RD,
.buf = pData + i,
.len = 1
}
};
status = i2c_transfer(state->i2c, msg, 2);
dprintk(2, "addr 0x%02x, ret = %d, val = 0x%02x\n",
subAddress + i, status, *(pData + i));
if (status < 0)
break;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
if (status < 0)
printk(KERN_ERR "Can't read from address 0x%02x,\n",
subAddress + i);
return status;
}
/*
* FIXME: Is this really needed?
*/
static int MT2063_Sleep(struct dvb_frontend *fe)
{
/*
* ToDo: Add code here to implement a OS blocking
*/
msleep(100);
return 0;
}
/*
* Microtune spur avoidance
*/
/* Implement ceiling, floor functions. */
#define ceil(n, d) (((n) < 0) ? (-((-(n))/(d))) : (n)/(d) + ((n)%(d) != 0))
#define floor(n, d) (((n) < 0) ? (-((-(n))/(d))) - ((n)%(d) != 0) : (n)/(d))
struct MT2063_FIFZone_t {
s32 min_;
s32 max_;
};
static struct MT2063_ExclZone_t *InsertNode(struct MT2063_AvoidSpursData_t
*pAS_Info,
struct MT2063_ExclZone_t *pPrevNode)
{
struct MT2063_ExclZone_t *pNode;
dprintk(2, "\n");
/* Check for a node in the free list */
if (pAS_Info->freeZones != NULL) {
/* Use one from the free list */
pNode = pAS_Info->freeZones;
pAS_Info->freeZones = pNode->next_;
} else {
/* Grab a node from the array */
pNode = &pAS_Info->MT2063_ExclZones[pAS_Info->nZones];
}
if (pPrevNode != NULL) {
pNode->next_ = pPrevNode->next_;
pPrevNode->next_ = pNode;
} else { /* insert at the beginning of the list */
pNode->next_ = pAS_Info->usedZones;
pAS_Info->usedZones = pNode;
}
pAS_Info->nZones++;
return pNode;
}
static struct MT2063_ExclZone_t *RemoveNode(struct MT2063_AvoidSpursData_t
*pAS_Info,
struct MT2063_ExclZone_t *pPrevNode,
struct MT2063_ExclZone_t
*pNodeToRemove)
{
struct MT2063_ExclZone_t *pNext = pNodeToRemove->next_;
dprintk(2, "\n");
/* Make previous node point to the subsequent node */
if (pPrevNode != NULL)
pPrevNode->next_ = pNext;
/* Add pNodeToRemove to the beginning of the freeZones */
pNodeToRemove->next_ = pAS_Info->freeZones;
pAS_Info->freeZones = pNodeToRemove;
/* Decrement node count */
pAS_Info->nZones--;
return pNext;
}
/*
* MT_AddExclZone()
*
* Add (and merge) an exclusion zone into the list.
* If the range (f_min, f_max) is totally outside the
* 1st IF BW, ignore the entry.
* If the range (f_min, f_max) is negative, ignore the entry.
*/
static void MT2063_AddExclZone(struct MT2063_AvoidSpursData_t *pAS_Info,
u32 f_min, u32 f_max)
{
struct MT2063_ExclZone_t *pNode = pAS_Info->usedZones;
struct MT2063_ExclZone_t *pPrev = NULL;
struct MT2063_ExclZone_t *pNext = NULL;
dprintk(2, "\n");
/* Check to see if this overlaps the 1st IF filter */
if ((f_max > (pAS_Info->f_if1_Center - (pAS_Info->f_if1_bw / 2)))
&& (f_min < (pAS_Info->f_if1_Center + (pAS_Info->f_if1_bw / 2)))
&& (f_min < f_max)) {
/*
* 1 2 3 4 5 6
*
* New entry: |---| |--| |--| |-| |---| |--|
* or or or or or
* Existing: |--| |--| |--| |---| |-| |--|
*/
/* Check for our place in the list */
while ((pNode != NULL) && (pNode->max_ < f_min)) {
pPrev = pNode;
pNode = pNode->next_;
}
if ((pNode != NULL) && (pNode->min_ < f_max)) {
/* Combine me with pNode */
if (f_min < pNode->min_)
pNode->min_ = f_min;
if (f_max > pNode->max_)
pNode->max_ = f_max;
} else {
pNode = InsertNode(pAS_Info, pPrev);
pNode->min_ = f_min;
pNode->max_ = f_max;
}
/* Look for merging possibilities */
pNext = pNode->next_;
while ((pNext != NULL) && (pNext->min_ < pNode->max_)) {
if (pNext->max_ > pNode->max_)
pNode->max_ = pNext->max_;
/* Remove pNext, return ptr to pNext->next */
pNext = RemoveNode(pAS_Info, pNode, pNext);
}
}
}
/*
* Reset all exclusion zones.
* Add zones to protect the PLL FracN regions near zero
*/
static void MT2063_ResetExclZones(struct MT2063_AvoidSpursData_t *pAS_Info)
{
u32 center;
dprintk(2, "\n");
pAS_Info->nZones = 0; /* this clears the used list */
pAS_Info->usedZones = NULL; /* reset ptr */
pAS_Info->freeZones = NULL; /* reset ptr */
center =
pAS_Info->f_ref *
((pAS_Info->f_if1_Center - pAS_Info->f_if1_bw / 2 +
pAS_Info->f_in) / pAS_Info->f_ref) - pAS_Info->f_in;
while (center <
pAS_Info->f_if1_Center + pAS_Info->f_if1_bw / 2 +
pAS_Info->f_LO1_FracN_Avoid) {
/* Exclude LO1 FracN */
MT2063_AddExclZone(pAS_Info,
center - pAS_Info->f_LO1_FracN_Avoid,
center - 1);
MT2063_AddExclZone(pAS_Info, center + 1,
center + pAS_Info->f_LO1_FracN_Avoid);
center += pAS_Info->f_ref;
}
center =
pAS_Info->f_ref *
((pAS_Info->f_if1_Center - pAS_Info->f_if1_bw / 2 -
pAS_Info->f_out) / pAS_Info->f_ref) + pAS_Info->f_out;
while (center <
pAS_Info->f_if1_Center + pAS_Info->f_if1_bw / 2 +
pAS_Info->f_LO2_FracN_Avoid) {
/* Exclude LO2 FracN */
MT2063_AddExclZone(pAS_Info,
center - pAS_Info->f_LO2_FracN_Avoid,
center - 1);
MT2063_AddExclZone(pAS_Info, center + 1,
center + pAS_Info->f_LO2_FracN_Avoid);
center += pAS_Info->f_ref;
}
if (MT2063_EXCLUDE_US_DECT_FREQUENCIES(pAS_Info->avoidDECT)) {
/* Exclude LO1 values that conflict with DECT channels */
MT2063_AddExclZone(pAS_Info, 1920836000 - pAS_Info->f_in, 1922236000 - pAS_Info->f_in); /* Ctr = 1921.536 */
MT2063_AddExclZone(pAS_Info, 1922564000 - pAS_Info->f_in, 1923964000 - pAS_Info->f_in); /* Ctr = 1923.264 */
MT2063_AddExclZone(pAS_Info, 1924292000 - pAS_Info->f_in, 1925692000 - pAS_Info->f_in); /* Ctr = 1924.992 */
MT2063_AddExclZone(pAS_Info, 1926020000 - pAS_Info->f_in, 1927420000 - pAS_Info->f_in); /* Ctr = 1926.720 */
MT2063_AddExclZone(pAS_Info, 1927748000 - pAS_Info->f_in, 1929148000 - pAS_Info->f_in); /* Ctr = 1928.448 */
}
if (MT2063_EXCLUDE_EURO_DECT_FREQUENCIES(pAS_Info->avoidDECT)) {
MT2063_AddExclZone(pAS_Info, 1896644000 - pAS_Info->f_in, 1898044000 - pAS_Info->f_in); /* Ctr = 1897.344 */
MT2063_AddExclZone(pAS_Info, 1894916000 - pAS_Info->f_in, 1896316000 - pAS_Info->f_in); /* Ctr = 1895.616 */
MT2063_AddExclZone(pAS_Info, 1893188000 - pAS_Info->f_in, 1894588000 - pAS_Info->f_in); /* Ctr = 1893.888 */
MT2063_AddExclZone(pAS_Info, 1891460000 - pAS_Info->f_in, 1892860000 - pAS_Info->f_in); /* Ctr = 1892.16 */
MT2063_AddExclZone(pAS_Info, 1889732000 - pAS_Info->f_in, 1891132000 - pAS_Info->f_in); /* Ctr = 1890.432 */
MT2063_AddExclZone(pAS_Info, 1888004000 - pAS_Info->f_in, 1889404000 - pAS_Info->f_in); /* Ctr = 1888.704 */
MT2063_AddExclZone(pAS_Info, 1886276000 - pAS_Info->f_in, 1887676000 - pAS_Info->f_in); /* Ctr = 1886.976 */
MT2063_AddExclZone(pAS_Info, 1884548000 - pAS_Info->f_in, 1885948000 - pAS_Info->f_in); /* Ctr = 1885.248 */
MT2063_AddExclZone(pAS_Info, 1882820000 - pAS_Info->f_in, 1884220000 - pAS_Info->f_in); /* Ctr = 1883.52 */
MT2063_AddExclZone(pAS_Info, 1881092000 - pAS_Info->f_in, 1882492000 - pAS_Info->f_in); /* Ctr = 1881.792 */
}
}
/*
* MT_ChooseFirstIF - Choose the best available 1st IF
* If f_Desired is not excluded, choose that first.
* Otherwise, return the value closest to f_Center that is
* not excluded
*/
static u32 MT2063_ChooseFirstIF(struct MT2063_AvoidSpursData_t *pAS_Info)
{
/*
* Update "f_Desired" to be the nearest "combinational-multiple" of
* "f_LO1_Step".
* The resulting number, F_LO1 must be a multiple of f_LO1_Step.
* And F_LO1 is the arithmetic sum of f_in + f_Center.
* Neither f_in, nor f_Center must be a multiple of f_LO1_Step.
* However, the sum must be.
*/
const u32 f_Desired =
pAS_Info->f_LO1_Step *
((pAS_Info->f_if1_Request + pAS_Info->f_in +
pAS_Info->f_LO1_Step / 2) / pAS_Info->f_LO1_Step) -
pAS_Info->f_in;
const u32 f_Step =
(pAS_Info->f_LO1_Step >
pAS_Info->f_LO2_Step) ? pAS_Info->f_LO1_Step : pAS_Info->
f_LO2_Step;
u32 f_Center;
s32 i;
s32 j = 0;
u32 bDesiredExcluded = 0;
u32 bZeroExcluded = 0;
s32 tmpMin, tmpMax;
s32 bestDiff;
struct MT2063_ExclZone_t *pNode = pAS_Info->usedZones;
struct MT2063_FIFZone_t zones[MT2063_MAX_ZONES];
dprintk(2, "\n");
if (pAS_Info->nZones == 0)
return f_Desired;
/*
* f_Center needs to be an integer multiple of f_Step away
* from f_Desired
*/
if (pAS_Info->f_if1_Center > f_Desired)
f_Center =
f_Desired +
f_Step *
((pAS_Info->f_if1_Center - f_Desired +
f_Step / 2) / f_Step);
else
f_Center =
f_Desired -
f_Step *
((f_Desired - pAS_Info->f_if1_Center +
f_Step / 2) / f_Step);
/*
* Take MT_ExclZones, center around f_Center and change the
* resolution to f_Step
*/
while (pNode != NULL) {
/* floor function */
tmpMin =
floor((s32) (pNode->min_ - f_Center), (s32) f_Step);
/* ceil function */
tmpMax =
ceil((s32) (pNode->max_ - f_Center), (s32) f_Step);
if ((pNode->min_ < f_Desired) && (pNode->max_ > f_Desired))
bDesiredExcluded = 1;
if ((tmpMin < 0) && (tmpMax > 0))
bZeroExcluded = 1;
/* See if this zone overlaps the previous */
if ((j > 0) && (tmpMin < zones[j - 1].max_))
zones[j - 1].max_ = tmpMax;
else {
/* Add new zone */
zones[j].min_ = tmpMin;
zones[j].max_ = tmpMax;
j++;
}
pNode = pNode->next_;
}
/*
* If the desired is okay, return with it
*/
if (bDesiredExcluded == 0)
return f_Desired;
/*
* If the desired is excluded and the center is okay, return with it
*/
if (bZeroExcluded == 0)
return f_Center;
/* Find the value closest to 0 (f_Center) */
bestDiff = zones[0].min_;
for (i = 0; i < j; i++) {
if (abs(zones[i].min_) < abs(bestDiff))
bestDiff = zones[i].min_;
if (abs(zones[i].max_) < abs(bestDiff))
bestDiff = zones[i].max_;
}
if (bestDiff < 0)
return f_Center - ((u32) (-bestDiff) * f_Step);
return f_Center + (bestDiff * f_Step);
}
/**
* gcd() - Uses Euclid's algorithm
*
* @u, @v: Unsigned values whose GCD is desired.
*
* Returns THE greatest common divisor of u and v, if either value is 0,
* the other value is returned as the result.
*/
static u32 MT2063_gcd(u32 u, u32 v)
{
u32 r;
while (v != 0) {
r = u % v;
u = v;
v = r;
}
return u;
}
/**
* IsSpurInBand() - Checks to see if a spur will be present within the IF's
* bandwidth. (fIFOut +/- fIFBW, -fIFOut +/- fIFBW)
*
* ma mb mc md
* <--+-+-+-------------------+-------------------+-+-+-->
* | ^ 0 ^ |
* ^ b=-fIFOut+fIFBW/2 -b=+fIFOut-fIFBW/2 ^
* a=-fIFOut-fIFBW/2 -a=+fIFOut+fIFBW/2
*
* Note that some equations are doubled to prevent round-off
* problems when calculating fIFBW/2
*
* @pAS_Info: Avoid Spurs information block
* @fm: If spur, amount f_IF1 has to move negative
* @fp: If spur, amount f_IF1 has to move positive
*
* Returns 1 if an LO spur would be present, otherwise 0.
*/
static u32 IsSpurInBand(struct MT2063_AvoidSpursData_t *pAS_Info,
u32 *fm, u32 * fp)
{
/*
** Calculate LO frequency settings.
*/
u32 n, n0;
const u32 f_LO1 = pAS_Info->f_LO1;
const u32 f_LO2 = pAS_Info->f_LO2;
const u32 d = pAS_Info->f_out + pAS_Info->f_out_bw / 2;
const u32 c = d - pAS_Info->f_out_bw;
const u32 f = pAS_Info->f_zif_bw / 2;
const u32 f_Scale = (f_LO1 / (UINT_MAX / 2 / pAS_Info->maxH1)) + 1;
s32 f_nsLO1, f_nsLO2;
s32 f_Spur;
u32 ma, mb, mc, md, me, mf;
u32 lo_gcd, gd_Scale, gc_Scale, gf_Scale, hgds, hgfs, hgcs;
dprintk(2, "\n");
*fm = 0;
/*
** For each edge (d, c & f), calculate a scale, based on the gcd
** of f_LO1, f_LO2 and the edge value. Use the larger of this
** gcd-based scale factor or f_Scale.
*/
lo_gcd = MT2063_gcd(f_LO1, f_LO2);
gd_Scale = max((u32) MT2063_gcd(lo_gcd, d), f_Scale);
hgds = gd_Scale / 2;
gc_Scale = max((u32) MT2063_gcd(lo_gcd, c), f_Scale);
hgcs = gc_Scale / 2;
gf_Scale = max((u32) MT2063_gcd(lo_gcd, f), f_Scale);
hgfs = gf_Scale / 2;
n0 = DIV_ROUND_UP(f_LO2 - d, f_LO1 - f_LO2);
/* Check out all multiples of LO1 from n0 to m_maxLOSpurHarmonic */
for (n = n0; n <= pAS_Info->maxH1; ++n) {
md = (n * ((f_LO1 + hgds) / gd_Scale) -
((d + hgds) / gd_Scale)) / ((f_LO2 + hgds) / gd_Scale);
/* If # fLO2 harmonics > m_maxLOSpurHarmonic, then no spurs present */
if (md >= pAS_Info->maxH1)
break;
ma = (n * ((f_LO1 + hgds) / gd_Scale) +
((d + hgds) / gd_Scale)) / ((f_LO2 + hgds) / gd_Scale);
/* If no spurs between +/- (f_out + f_IFBW/2), then try next harmonic */
if (md == ma)
continue;
mc = (n * ((f_LO1 + hgcs) / gc_Scale) -
((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale);
if (mc != md) {
f_nsLO1 = (s32) (n * (f_LO1 / gc_Scale));
f_nsLO2 = (s32) (mc * (f_LO2 / gc_Scale));
f_Spur =
(gc_Scale * (f_nsLO1 - f_nsLO2)) +
n * (f_LO1 % gc_Scale) - mc * (f_LO2 % gc_Scale);
*fp = ((f_Spur - (s32) c) / (mc - n)) + 1;
*fm = (((s32) d - f_Spur) / (mc - n)) + 1;
return 1;
}
/* Location of Zero-IF-spur to be checked */
me = (n * ((f_LO1 + hgfs) / gf_Scale) +
((f + hgfs) / gf_Scale)) / ((f_LO2 + hgfs) / gf_Scale);
mf = (n * ((f_LO1 + hgfs) / gf_Scale) -
((f + hgfs) / gf_Scale)) / ((f_LO2 + hgfs) / gf_Scale);
if (me != mf) {
f_nsLO1 = n * (f_LO1 / gf_Scale);
f_nsLO2 = me * (f_LO2 / gf_Scale);
f_Spur =
(gf_Scale * (f_nsLO1 - f_nsLO2)) +
n * (f_LO1 % gf_Scale) - me * (f_LO2 % gf_Scale);
*fp = ((f_Spur + (s32) f) / (me - n)) + 1;
*fm = (((s32) f - f_Spur) / (me - n)) + 1;
return 1;
}
mb = (n * ((f_LO1 + hgcs) / gc_Scale) +
((c + hgcs) / gc_Scale)) / ((f_LO2 + hgcs) / gc_Scale);
if (ma != mb) {
f_nsLO1 = n * (f_LO1 / gc_Scale);
f_nsLO2 = ma * (f_LO2 / gc_Scale);
f_Spur =
(gc_Scale * (f_nsLO1 - f_nsLO2)) +
n * (f_LO1 % gc_Scale) - ma * (f_LO2 % gc_Scale);
*fp = (((s32) d + f_Spur) / (ma - n)) + 1;
*fm = (-(f_Spur + (s32) c) / (ma - n)) + 1;
return 1;
}
}
/* No spurs found */
return 0;
}
/*
* MT_AvoidSpurs() - Main entry point to avoid spurs.
* Checks for existing spurs in present LO1, LO2 freqs
* and if present, chooses spur-free LO1, LO2 combination
* that tunes the same input/output frequencies.
*/
static u32 MT2063_AvoidSpurs(struct MT2063_AvoidSpursData_t *pAS_Info)
{
u32 status = 0;
u32 fm, fp; /* restricted range on LO's */
pAS_Info->bSpurAvoided = 0;
pAS_Info->nSpursFound = 0;
dprintk(2, "\n");
if (pAS_Info->maxH1 == 0)
return 0;
/*
* Avoid LO Generated Spurs
*
* Make sure that have no LO-related spurs within the IF output
* bandwidth.
*
* If there is an LO spur in this band, start at the current IF1 frequency
* and work out until we find a spur-free frequency or run up against the
* 1st IF SAW band edge. Use temporary copies of fLO1 and fLO2 so that they
* will be unchanged if a spur-free setting is not found.
*/
pAS_Info->bSpurPresent = IsSpurInBand(pAS_Info, &fm, &fp);
if (pAS_Info->bSpurPresent) {
u32 zfIF1 = pAS_Info->f_LO1 - pAS_Info->f_in; /* current attempt at a 1st IF */
u32 zfLO1 = pAS_Info->f_LO1; /* current attempt at an LO1 freq */
u32 zfLO2 = pAS_Info->f_LO2; /* current attempt at an LO2 freq */
u32 delta_IF1;
u32 new_IF1;
/*
** Spur was found, attempt to find a spur-free 1st IF
*/
do {
pAS_Info->nSpursFound++;
/* Raise f_IF1_upper, if needed */
MT2063_AddExclZone(pAS_Info, zfIF1 - fm, zfIF1 + fp);
/* Choose next IF1 that is closest to f_IF1_CENTER */
new_IF1 = MT2063_ChooseFirstIF(pAS_Info);
if (new_IF1 > zfIF1) {
pAS_Info->f_LO1 += (new_IF1 - zfIF1);
pAS_Info->f_LO2 += (new_IF1 - zfIF1);
} else {
pAS_Info->f_LO1 -= (zfIF1 - new_IF1);
pAS_Info->f_LO2 -= (zfIF1 - new_IF1);
}
zfIF1 = new_IF1;
if (zfIF1 > pAS_Info->f_if1_Center)
delta_IF1 = zfIF1 - pAS_Info->f_if1_Center;
else
delta_IF1 = pAS_Info->f_if1_Center - zfIF1;
pAS_Info->bSpurPresent = IsSpurInBand(pAS_Info, &fm, &fp);
/*
* Continue while the new 1st IF is still within the 1st IF bandwidth
* and there is a spur in the band (again)
*/
} while ((2 * delta_IF1 + pAS_Info->f_out_bw <= pAS_Info->f_if1_bw) && pAS_Info->bSpurPresent);
/*
* Use the LO-spur free values found. If the search went all
* the way to the 1st IF band edge and always found spurs, just
* leave the original choice. It's as "good" as any other.
*/
if (pAS_Info->bSpurPresent == 1) {
status |= MT2063_SPUR_PRESENT_ERR;
pAS_Info->f_LO1 = zfLO1;
pAS_Info->f_LO2 = zfLO2;
} else
pAS_Info->bSpurAvoided = 1;
}
status |=
((pAS_Info->
nSpursFound << MT2063_SPUR_SHIFT) & MT2063_SPUR_CNT_MASK);
return status;
}
/*
* Constants used by the tuning algorithm
*/
#define MT2063_REF_FREQ (16000000UL) /* Reference oscillator Frequency (in Hz) */
#define MT2063_IF1_BW (22000000UL) /* The IF1 filter bandwidth (in Hz) */
#define MT2063_TUNE_STEP_SIZE (50000UL) /* Tune in steps of 50 kHz */
#define MT2063_SPUR_STEP_HZ (250000UL) /* Step size (in Hz) to move IF1 when avoiding spurs */
#define MT2063_ZIF_BW (2000000UL) /* Zero-IF spur-free bandwidth (in Hz) */
#define MT2063_MAX_HARMONICS_1 (15UL) /* Highest intra-tuner LO Spur Harmonic to be avoided */
#define MT2063_MAX_HARMONICS_2 (5UL) /* Highest inter-tuner LO Spur Harmonic to be avoided */
#define MT2063_MIN_LO_SEP (1000000UL) /* Minimum inter-tuner LO frequency separation */
#define MT2063_LO1_FRACN_AVOID (0UL) /* LO1 FracN numerator avoid region (in Hz) */
#define MT2063_LO2_FRACN_AVOID (199999UL) /* LO2 FracN numerator avoid region (in Hz) */
#define MT2063_MIN_FIN_FREQ (44000000UL) /* Minimum input frequency (in Hz) */
#define MT2063_MAX_FIN_FREQ (1100000000UL) /* Maximum input frequency (in Hz) */
#define MT2063_MIN_FOUT_FREQ (36000000UL) /* Minimum output frequency (in Hz) */
#define MT2063_MAX_FOUT_FREQ (57000000UL) /* Maximum output frequency (in Hz) */
#define MT2063_MIN_DNC_FREQ (1293000000UL) /* Minimum LO2 frequency (in Hz) */
#define MT2063_MAX_DNC_FREQ (1614000000UL) /* Maximum LO2 frequency (in Hz) */
#define MT2063_MIN_UPC_FREQ (1396000000UL) /* Minimum LO1 frequency (in Hz) */
#define MT2063_MAX_UPC_FREQ (2750000000UL) /* Maximum LO1 frequency (in Hz) */
/*
* Define the supported Part/Rev codes for the MT2063
*/
#define MT2063_B0 (0x9B)
#define MT2063_B1 (0x9C)
#define MT2063_B2 (0x9D)
#define MT2063_B3 (0x9E)
/**
* mt2063_lockStatus - Checks to see if LO1 and LO2 are locked
*
* @state: struct mt2063_state pointer
*
* This function returns 0, if no lock, 1 if locked and a value < 1 if error
*/
static unsigned int mt2063_lockStatus(struct mt2063_state *state)
{
const u32 nMaxWait = 100; /* wait a maximum of 100 msec */
const u32 nPollRate = 2; /* poll status bits every 2 ms */
const u32 nMaxLoops = nMaxWait / nPollRate;
const u8 LO1LK = 0x80;
u8 LO2LK = 0x08;
u32 status;
u32 nDelays = 0;
dprintk(2, "\n");
/* LO2 Lock bit was in a different place for B0 version */
if (state->tuner_id == MT2063_B0)
LO2LK = 0x40;
do {
status = mt2063_read(state, MT2063_REG_LO_STATUS,
&state->reg[MT2063_REG_LO_STATUS], 1);
if (status < 0)
return status;
if ((state->reg[MT2063_REG_LO_STATUS] & (LO1LK | LO2LK)) ==
(LO1LK | LO2LK)) {
return TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO;
}
msleep(nPollRate); /* Wait between retries */
} while (++nDelays < nMaxLoops);
/*
* Got no lock or partial lock
*/
return 0;
}
/*
* Constants for setting receiver modes.
* (6 modes defined at this time, enumerated by mt2063_delivery_sys)
* (DNC1GC & DNC2GC are the values, which are used, when the specific
* DNC Output is selected, the other is always off)
*
* enum mt2063_delivery_sys
* -------------+----------------------------------------------
* Mode 0 : | MT2063_CABLE_QAM
* Mode 1 : | MT2063_CABLE_ANALOG
* Mode 2 : | MT2063_OFFAIR_COFDM
* Mode 3 : | MT2063_OFFAIR_COFDM_SAWLESS
* Mode 4 : | MT2063_OFFAIR_ANALOG
* Mode 5 : | MT2063_OFFAIR_8VSB
* --------------+----------------------------------------------
*
* |<---------- Mode -------------->|
* Reg Field | 0 | 1 | 2 | 3 | 4 | 5 |
* ------------+-----+-----+-----+-----+-----+-----+
* RFAGCen | OFF | OFF | OFF | OFF | OFF | OFF
* LNARin | 0 | 0 | 3 | 3 | 3 | 3
* FIFFQen | 1 | 1 | 1 | 1 | 1 | 1
* FIFFq | 0 | 0 | 0 | 0 | 0 | 0
* DNC1gc | 0 | 0 | 0 | 0 | 0 | 0
* DNC2gc | 0 | 0 | 0 | 0 | 0 | 0
* GCU Auto | 1 | 1 | 1 | 1 | 1 | 1
* LNA max Atn | 31 | 31 | 31 | 31 | 31 | 31
* LNA Target | 44 | 43 | 43 | 43 | 43 | 43
* ign RF Ovl | 0 | 0 | 0 | 0 | 0 | 0
* RF max Atn | 31 | 31 | 31 | 31 | 31 | 31
* PD1 Target | 36 | 36 | 38 | 38 | 36 | 38
* ign FIF Ovl | 0 | 0 | 0 | 0 | 0 | 0
* FIF max Atn | 5 | 5 | 5 | 5 | 5 | 5
* PD2 Target | 40 | 33 | 42 | 42 | 33 | 42
*/
enum mt2063_delivery_sys {
MT2063_CABLE_QAM = 0,
MT2063_CABLE_ANALOG,
MT2063_OFFAIR_COFDM,
MT2063_OFFAIR_COFDM_SAWLESS,
MT2063_OFFAIR_ANALOG,
MT2063_OFFAIR_8VSB,
MT2063_NUM_RCVR_MODES
};
static const char *mt2063_mode_name[] = {
[MT2063_CABLE_QAM] = "digital cable",
[MT2063_CABLE_ANALOG] = "analog cable",
[MT2063_OFFAIR_COFDM] = "digital offair",
[MT2063_OFFAIR_COFDM_SAWLESS] = "digital offair without SAW",
[MT2063_OFFAIR_ANALOG] = "analog offair",
[MT2063_OFFAIR_8VSB] = "analog offair 8vsb",
};
static const u8 RFAGCEN[] = { 0, 0, 0, 0, 0, 0 };
static const u8 LNARIN[] = { 0, 0, 3, 3, 3, 3 };
static const u8 FIFFQEN[] = { 1, 1, 1, 1, 1, 1 };
static const u8 FIFFQ[] = { 0, 0, 0, 0, 0, 0 };
static const u8 DNC1GC[] = { 0, 0, 0, 0, 0, 0 };
static const u8 DNC2GC[] = { 0, 0, 0, 0, 0, 0 };
static const u8 ACLNAMAX[] = { 31, 31, 31, 31, 31, 31 };
static const u8 LNATGT[] = { 44, 43, 43, 43, 43, 43 };
static const u8 RFOVDIS[] = { 0, 0, 0, 0, 0, 0 };
static const u8 ACRFMAX[] = { 31, 31, 31, 31, 31, 31 };
static const u8 PD1TGT[] = { 36, 36, 38, 38, 36, 38 };
static const u8 FIFOVDIS[] = { 0, 0, 0, 0, 0, 0 };
static const u8 ACFIFMAX[] = { 29, 29, 29, 29, 29, 29 };
static const u8 PD2TGT[] = { 40, 33, 38, 42, 30, 38 };
/*
* mt2063_set_dnc_output_enable()
*/
static u32 mt2063_get_dnc_output_enable(struct mt2063_state *state,
enum MT2063_DNC_Output_Enable *pValue)
{
dprintk(2, "\n");
if ((state->reg[MT2063_REG_DNC_GAIN] & 0x03) == 0x03) { /* if DNC1 is off */
if ((state->reg[MT2063_REG_VGA_GAIN] & 0x03) == 0x03) /* if DNC2 is off */
*pValue = MT2063_DNC_NONE;
else
*pValue = MT2063_DNC_2;
} else { /* DNC1 is on */
if ((state->reg[MT2063_REG_VGA_GAIN] & 0x03) == 0x03) /* if DNC2 is off */
*pValue = MT2063_DNC_1;
else
*pValue = MT2063_DNC_BOTH;
}
return 0;
}
/*
* mt2063_set_dnc_output_enable()
*/
static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
enum MT2063_DNC_Output_Enable nValue)
{
u32 status = 0; /* Status to be returned */
u8 val = 0;
dprintk(2, "\n");
/* selects, which DNC output is used */
switch (nValue) {
case MT2063_DNC_NONE:
val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | 0x03; /* Set DNC1GC=3 */
if (state->reg[MT2063_REG_DNC_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_DNC_GAIN,
val);
val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | 0x03; /* Set DNC2GC=3 */
if (state->reg[MT2063_REG_VGA_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_VGA_GAIN,
val);
val = (state->reg[MT2063_REG_RSVD_20] & ~0x40); /* Set PD2MUX=0 */
if (state->reg[MT2063_REG_RSVD_20] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_RSVD_20,
val);
break;
case MT2063_DNC_1:
val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | (DNC1GC[state->rcvr_mode] & 0x03); /* Set DNC1GC=x */
if (state->reg[MT2063_REG_DNC_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_DNC_GAIN,
val);
val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | 0x03; /* Set DNC2GC=3 */
if (state->reg[MT2063_REG_VGA_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_VGA_GAIN,
val);
val = (state->reg[MT2063_REG_RSVD_20] & ~0x40); /* Set PD2MUX=0 */
if (state->reg[MT2063_REG_RSVD_20] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_RSVD_20,
val);
break;
case MT2063_DNC_2:
val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | 0x03; /* Set DNC1GC=3 */
if (state->reg[MT2063_REG_DNC_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_DNC_GAIN,
val);
val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | (DNC2GC[state->rcvr_mode] & 0x03); /* Set DNC2GC=x */
if (state->reg[MT2063_REG_VGA_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_VGA_GAIN,
val);
val = (state->reg[MT2063_REG_RSVD_20] | 0x40); /* Set PD2MUX=1 */
if (state->reg[MT2063_REG_RSVD_20] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_RSVD_20,
val);
break;
case MT2063_DNC_BOTH:
val = (state->reg[MT2063_REG_DNC_GAIN] & 0xFC) | (DNC1GC[state->rcvr_mode] & 0x03); /* Set DNC1GC=x */
if (state->reg[MT2063_REG_DNC_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_DNC_GAIN,
val);
val = (state->reg[MT2063_REG_VGA_GAIN] & 0xFC) | (DNC2GC[state->rcvr_mode] & 0x03); /* Set DNC2GC=x */
if (state->reg[MT2063_REG_VGA_GAIN] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_VGA_GAIN,
val);
val = (state->reg[MT2063_REG_RSVD_20] | 0x40); /* Set PD2MUX=1 */
if (state->reg[MT2063_REG_RSVD_20] !=
val)
status |=
mt2063_setreg(state,
MT2063_REG_RSVD_20,
val);
break;
default:
break;
}
return status;
}
/*
* MT2063_SetReceiverMode() - Set the MT2063 receiver mode, according with
* the selected enum mt2063_delivery_sys type.
*
* (DNC1GC & DNC2GC are the values, which are used, when the specific
* DNC Output is selected, the other is always off)
*
* @state: ptr to mt2063_state structure
* @Mode: desired reciever delivery system
*
* Note: Register cache must be valid for it to work
*/
static u32 MT2063_SetReceiverMode(struct mt2063_state *state,
enum mt2063_delivery_sys Mode)
{
u32 status = 0; /* Status to be returned */
u8 val;
u32 longval;
dprintk(2, "\n");
if (Mode >= MT2063_NUM_RCVR_MODES)
status = -ERANGE;
/* RFAGCen */
if (status >= 0) {
val =
(state->
reg[MT2063_REG_PD1_TGT] & (u8) ~0x40) | (RFAGCEN[Mode]
? 0x40 :
0x00);
if (state->reg[MT2063_REG_PD1_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
}
/* LNARin */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_CTRL_2C] & (u8) ~0x03) |
(LNARIN[Mode] & 0x03);
if (state->reg[MT2063_REG_CTRL_2C] != val)
status |= mt2063_setreg(state, MT2063_REG_CTRL_2C, val);
}
/* FIFFQEN and FIFFQ */
if (status >= 0) {
val =
(state->
reg[MT2063_REG_FIFF_CTRL2] & (u8) ~0xF0) |
(FIFFQEN[Mode] << 7) | (FIFFQ[Mode] << 4);
if (state->reg[MT2063_REG_FIFF_CTRL2] != val) {
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL2, val);
/* trigger FIFF calibration, needed after changing FIFFQ */
val =
(state->reg[MT2063_REG_FIFF_CTRL] | (u8) 0x01);
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
val =
(state->
reg[MT2063_REG_FIFF_CTRL] & (u8) ~0x01);
status |=
mt2063_setreg(state, MT2063_REG_FIFF_CTRL, val);
}
}
/* DNC1GC & DNC2GC */
status |= mt2063_get_dnc_output_enable(state, &longval);
status |= mt2063_set_dnc_output_enable(state, longval);
/* acLNAmax */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_LNA_OV] & (u8) ~0x1F) |
(ACLNAMAX[Mode] & 0x1F);
if (state->reg[MT2063_REG_LNA_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_OV, val);
}
/* LNATGT */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x3F) |
(LNATGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_LNA_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
}
/* ACRF */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_RF_OV] & (u8) ~0x1F) |
(ACRFMAX[Mode] & 0x1F);
if (state->reg[MT2063_REG_RF_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_RF_OV, val);
}
/* PD1TGT */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x3F) |
(PD1TGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_PD1_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
}
/* FIFATN */
if (status >= 0) {
u8 val = ACFIFMAX[Mode];
if (state->reg[MT2063_REG_PART_REV] != MT2063_B3 && val > 5)
val = 5;
val = (state->reg[MT2063_REG_FIF_OV] & (u8) ~0x1F) |
(val & 0x1F);
if (state->reg[MT2063_REG_FIF_OV] != val)
status |= mt2063_setreg(state, MT2063_REG_FIF_OV, val);
}
/* PD2TGT */
if (status >= 0) {
u8 val = (state->reg[MT2063_REG_PD2_TGT] & (u8) ~0x3F) |
(PD2TGT[Mode] & 0x3F);
if (state->reg[MT2063_REG_PD2_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD2_TGT, val);
}
/* Ignore ATN Overload */
if (status >= 0) {
val = (state->reg[MT2063_REG_LNA_TGT] & (u8) ~0x80) |
(RFOVDIS[Mode] ? 0x80 : 0x00);
if (state->reg[MT2063_REG_LNA_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_LNA_TGT, val);
}
/* Ignore FIF Overload */
if (status >= 0) {
val = (state->reg[MT2063_REG_PD1_TGT] & (u8) ~0x80) |
(FIFOVDIS[Mode] ? 0x80 : 0x00);
if (state->reg[MT2063_REG_PD1_TGT] != val)
status |= mt2063_setreg(state, MT2063_REG_PD1_TGT, val);
}
if (status >= 0) {
state->rcvr_mode = Mode;
dprintk(1, "mt2063 mode changed to %s\n",
mt2063_mode_name[state->rcvr_mode]);
}
return status;
}
/*
* MT2063_ClearPowerMaskBits () - Clears the power-down mask bits for various
* sections of the MT2063
*
* @Bits: Mask bits to be cleared.
*
* See definition of MT2063_Mask_Bits type for description
* of each of the power bits.
*/
static u32 MT2063_ClearPowerMaskBits(struct mt2063_state *state,
enum MT2063_Mask_Bits Bits)
{
u32 status = 0;
dprintk(2, "\n");
Bits = (enum MT2063_Mask_Bits)(Bits & MT2063_ALL_SD); /* Only valid bits for this tuner */
if ((Bits & 0xFF00) != 0) {
state->reg[MT2063_REG_PWR_2] &= ~(u8) (Bits >> 8);
status |=
mt2063_write(state,
MT2063_REG_PWR_2,
&state->reg[MT2063_REG_PWR_2], 1);
}
if ((Bits & 0xFF) != 0) {
state->reg[MT2063_REG_PWR_1] &= ~(u8) (Bits & 0xFF);
status |=
mt2063_write(state,
MT2063_REG_PWR_1,
&state->reg[MT2063_REG_PWR_1], 1);
}
return status;
}
/*
* MT2063_SoftwareShutdown() - Enables or disables software shutdown function.
* When Shutdown is 1, any section whose power
* mask is set will be shutdown.
*/
static u32 MT2063_SoftwareShutdown(struct mt2063_state *state, u8 Shutdown)
{
u32 status;
dprintk(2, "\n");
if (Shutdown == 1)
state->reg[MT2063_REG_PWR_1] |= 0x04;
else
state->reg[MT2063_REG_PWR_1] &= ~0x04;
status = mt2063_write(state,
MT2063_REG_PWR_1,
&state->reg[MT2063_REG_PWR_1], 1);
if (Shutdown != 1) {
state->reg[MT2063_REG_BYP_CTRL] =
(state->reg[MT2063_REG_BYP_CTRL] & 0x9F) | 0x40;
status |=
mt2063_write(state,
MT2063_REG_BYP_CTRL,
&state->reg[MT2063_REG_BYP_CTRL],
1);
state->reg[MT2063_REG_BYP_CTRL] =
(state->reg[MT2063_REG_BYP_CTRL] & 0x9F);
status |=
mt2063_write(state,
MT2063_REG_BYP_CTRL,
&state->reg[MT2063_REG_BYP_CTRL],
1);
}
return status;
}
static u32 MT2063_Round_fLO(u32 f_LO, u32 f_LO_Step, u32 f_ref)
{
return f_ref * (f_LO / f_ref)
+ f_LO_Step * (((f_LO % f_ref) + (f_LO_Step / 2)) / f_LO_Step);
}
/**
* fLO_FractionalTerm() - Calculates the portion contributed by FracN / denom.
* This function preserves maximum precision without
* risk of overflow. It accurately calculates
* f_ref * num / denom to within 1 HZ with fixed math.
*
* @num : Fractional portion of the multiplier
* @denom: denominator portion of the ratio
* @f_Ref: SRO frequency.
*
* This calculation handles f_ref as two separate 14-bit fields.
* Therefore, a maximum value of 2^28-1 may safely be used for f_ref.
* This is the genesis of the magic number "14" and the magic mask value of
* 0x03FFF.
*
* This routine successfully handles denom values up to and including 2^18.
* Returns: f_ref * num / denom
*/
static u32 MT2063_fLO_FractionalTerm(u32 f_ref, u32 num, u32 denom)
{
u32 t1 = (f_ref >> 14) * num;
u32 term1 = t1 / denom;
u32 loss = t1 % denom;
u32 term2 =
(((f_ref & 0x00003FFF) * num + (loss << 14)) + (denom / 2)) / denom;
return (term1 << 14) + term2;
}
/*
* CalcLO1Mult()- Calculates Integer divider value and the numerator
* value for a FracN PLL.
*
* This function assumes that the f_LO and f_Ref are
* evenly divisible by f_LO_Step.
*
* @Div: OUTPUT: Whole number portion of the multiplier
* @FracN: OUTPUT: Fractional portion of the multiplier
* @f_LO: desired LO frequency.
* @f_LO_Step: Minimum step size for the LO (in Hz).
* @f_Ref: SRO frequency.
* @f_Avoid: Range of PLL frequencies to avoid near integer multiples
* of f_Ref (in Hz).
*
* Returns: Recalculated LO frequency.
*/
static u32 MT2063_CalcLO1Mult(u32 *Div,
u32 *FracN,
u32 f_LO,
u32 f_LO_Step, u32 f_Ref)
{
/* Calculate the whole number portion of the divider */
*Div = f_LO / f_Ref;
/* Calculate the numerator value (round to nearest f_LO_Step) */
*FracN =
(64 * (((f_LO % f_Ref) + (f_LO_Step / 2)) / f_LO_Step) +
(f_Ref / f_LO_Step / 2)) / (f_Ref / f_LO_Step);
return (f_Ref * (*Div)) + MT2063_fLO_FractionalTerm(f_Ref, *FracN, 64);
}
/**
* CalcLO2Mult() - Calculates Integer divider value and the numerator
* value for a FracN PLL.
*
* This function assumes that the f_LO and f_Ref are
* evenly divisible by f_LO_Step.
*
* @Div: OUTPUT: Whole number portion of the multiplier
* @FracN: OUTPUT: Fractional portion of the multiplier
* @f_LO: desired LO frequency.
* @f_LO_Step: Minimum step size for the LO (in Hz).
* @f_Ref: SRO frequency.
* @f_Avoid: Range of PLL frequencies to avoid near
* integer multiples of f_Ref (in Hz).
*
* Returns: Recalculated LO frequency.
*/
static u32 MT2063_CalcLO2Mult(u32 *Div,
u32 *FracN,
u32 f_LO,
u32 f_LO_Step, u32 f_Ref)
{
/* Calculate the whole number portion of the divider */
*Div = f_LO / f_Ref;
/* Calculate the numerator value (round to nearest f_LO_Step) */
*FracN =
(8191 * (((f_LO % f_Ref) + (f_LO_Step / 2)) / f_LO_Step) +
(f_Ref / f_LO_Step / 2)) / (f_Ref / f_LO_Step);
return (f_Ref * (*Div)) + MT2063_fLO_FractionalTerm(f_Ref, *FracN,
8191);
}
/*
* FindClearTuneFilter() - Calculate the corrrect ClearTune filter to be
* used for a given input frequency.
*
* @state: ptr to tuner data structure
* @f_in: RF input center frequency (in Hz).
*
* Returns: ClearTune filter number (0-31)
*/
static u32 FindClearTuneFilter(struct mt2063_state *state, u32 f_in)
{
u32 RFBand;
u32 idx; /* index loop */
/*
** Find RF Band setting
*/
RFBand = 31; /* def when f_in > all */
for (idx = 0; idx < 31; ++idx) {
if (state->CTFiltMax[idx] >= f_in) {
RFBand = idx;
break;
}
}
return RFBand;
}
/*
* MT2063_Tune() - Change the tuner's tuned frequency to RFin.
*/
static u32 MT2063_Tune(struct mt2063_state *state, u32 f_in)
{ /* RF input center frequency */
u32 status = 0;
u32 LO1; /* 1st LO register value */
u32 Num1; /* Numerator for LO1 reg. value */
u32 f_IF1; /* 1st IF requested */
u32 LO2; /* 2nd LO register value */
u32 Num2; /* Numerator for LO2 reg. value */
u32 ofLO1, ofLO2; /* last time's LO frequencies */
u8 fiffc = 0x80; /* FIFF center freq from tuner */
u32 fiffof; /* Offset from FIFF center freq */
const u8 LO1LK = 0x80; /* Mask for LO1 Lock bit */
u8 LO2LK = 0x08; /* Mask for LO2 Lock bit */
u8 val;
u32 RFBand;
dprintk(2, "\n");
/* Check the input and output frequency ranges */
if ((f_in < MT2063_MIN_FIN_FREQ) || (f_in > MT2063_MAX_FIN_FREQ))
return -EINVAL;
if ((state->AS_Data.f_out < MT2063_MIN_FOUT_FREQ)
|| (state->AS_Data.f_out > MT2063_MAX_FOUT_FREQ))
return -EINVAL;
/*
* Save original LO1 and LO2 register values
*/
ofLO1 = state->AS_Data.f_LO1;
ofLO2 = state->AS_Data.f_LO2;
/*
* Find and set RF Band setting
*/
if (state->ctfilt_sw == 1) {
val = (state->reg[MT2063_REG_CTUNE_CTRL] | 0x08);
if (state->reg[MT2063_REG_CTUNE_CTRL] != val) {
status |=
mt2063_setreg(state, MT2063_REG_CTUNE_CTRL, val);
}
val = state->reg[MT2063_REG_CTUNE_OV];
RFBand = FindClearTuneFilter(state, f_in);
state->reg[MT2063_REG_CTUNE_OV] =
(u8) ((state->reg[MT2063_REG_CTUNE_OV] & ~0x1F)
| RFBand);
if (state->reg[MT2063_REG_CTUNE_OV] != val) {
status |=
mt2063_setreg(state, MT2063_REG_CTUNE_OV, val);
}
}
/*
* Read the FIFF Center Frequency from the tuner
*/
if (status >= 0) {
status |=
mt2063_read(state,
MT2063_REG_FIFFC,
&state->reg[MT2063_REG_FIFFC], 1);
fiffc = state->reg[MT2063_REG_FIFFC];
}
/*
* Assign in the requested values
*/
state->AS_Data.f_in = f_in;
/* Request a 1st IF such that LO1 is on a step size */
state->AS_Data.f_if1_Request =
MT2063_Round_fLO(state->AS_Data.f_if1_Request + f_in,
state->AS_Data.f_LO1_Step,
state->AS_Data.f_ref) - f_in;
/*
* Calculate frequency settings. f_IF1_FREQ + f_in is the
* desired LO1 frequency
*/
MT2063_ResetExclZones(&state->AS_Data);
f_IF1 = MT2063_ChooseFirstIF(&state->AS_Data);
state->AS_Data.f_LO1 =
MT2063_Round_fLO(f_IF1 + f_in, state->AS_Data.f_LO1_Step,
state->AS_Data.f_ref);
state->AS_Data.f_LO2 =
MT2063_Round_fLO(state->AS_Data.f_LO1 - state->AS_Data.f_out - f_in,
state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
/*
* Check for any LO spurs in the output bandwidth and adjust
* the LO settings to avoid them if needed
*/
status |= MT2063_AvoidSpurs(&state->AS_Data);
/*
* MT_AvoidSpurs spurs may have changed the LO1 & LO2 values.
* Recalculate the LO frequencies and the values to be placed
* in the tuning registers.
*/
state->AS_Data.f_LO1 =
MT2063_CalcLO1Mult(&LO1, &Num1, state->AS_Data.f_LO1,
state->AS_Data.f_LO1_Step, state->AS_Data.f_ref);
state->AS_Data.f_LO2 =
MT2063_Round_fLO(state->AS_Data.f_LO1 - state->AS_Data.f_out - f_in,
state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
state->AS_Data.f_LO2 =
MT2063_CalcLO2Mult(&LO2, &Num2, state->AS_Data.f_LO2,
state->AS_Data.f_LO2_Step, state->AS_Data.f_ref);
/*
* Check the upconverter and downconverter frequency ranges
*/
if ((state->AS_Data.f_LO1 < MT2063_MIN_UPC_FREQ)
|| (state->AS_Data.f_LO1 > MT2063_MAX_UPC_FREQ))
status |= MT2063_UPC_RANGE;
if ((state->AS_Data.f_LO2 < MT2063_MIN_DNC_FREQ)
|| (state->AS_Data.f_LO2 > MT2063_MAX_DNC_FREQ))
status |= MT2063_DNC_RANGE;
/* LO2 Lock bit was in a different place for B0 version */
if (state->tuner_id == MT2063_B0)
LO2LK = 0x40;
/*
* If we have the same LO frequencies and we're already locked,
* then skip re-programming the LO registers.
*/
if ((ofLO1 != state->AS_Data.f_LO1)
|| (ofLO2 != state->AS_Data.f_LO2)
|| ((state->reg[MT2063_REG_LO_STATUS] & (LO1LK | LO2LK)) !=
(LO1LK | LO2LK))) {
/*
* Calculate the FIFFOF register value
*
* IF1_Actual
* FIFFOF = ------------ - 8 * FIFFC - 4992
* f_ref/64
*/
fiffof =
(state->AS_Data.f_LO1 -
f_in) / (state->AS_Data.f_ref / 64) - 8 * (u32) fiffc -
4992;
if (fiffof > 0xFF)
fiffof = 0xFF;
/*
* Place all of the calculated values into the local tuner
* register fields.
*/
if (status >= 0) {
state->reg[MT2063_REG_LO1CQ_1] = (u8) (LO1 & 0xFF); /* DIV1q */
state->reg[MT2063_REG_LO1CQ_2] = (u8) (Num1 & 0x3F); /* NUM1q */
state->reg[MT2063_REG_LO2CQ_1] = (u8) (((LO2 & 0x7F) << 1) /* DIV2q */
|(Num2 >> 12)); /* NUM2q (hi) */
state->reg[MT2063_REG_LO2CQ_2] = (u8) ((Num2 & 0x0FF0) >> 4); /* NUM2q (mid) */
state->reg[MT2063_REG_LO2CQ_3] = (u8) (0xE0 | (Num2 & 0x000F)); /* NUM2q (lo) */
/*
* Now write out the computed register values
* IMPORTANT: There is a required order for writing
* (0x05 must follow all the others).
*/
status |= mt2063_write(state, MT2063_REG_LO1CQ_1, &state->reg[MT2063_REG_LO1CQ_1], 5); /* 0x01 - 0x05 */
if (state->tuner_id == MT2063_B0) {
/* Re-write the one-shot bits to trigger the tune operation */
status |= mt2063_write(state, MT2063_REG_LO2CQ_3, &state->reg[MT2063_REG_LO2CQ_3], 1); /* 0x05 */
}
/* Write out the FIFF offset only if it's changing */
if (state->reg[MT2063_REG_FIFF_OFFSET] !=
(u8) fiffof) {
state->reg[MT2063_REG_FIFF_OFFSET] =
(u8) fiffof;
status |=
mt2063_write(state,
MT2063_REG_FIFF_OFFSET,
&state->
reg[MT2063_REG_FIFF_OFFSET],
1);
}
}
/*
* Check for LO's locking
*/
if (status < 0)
return status;
status = mt2063_lockStatus(state);
if (status < 0)
return status;
if (!status)
return -EINVAL; /* Couldn't lock */
/*
* If we locked OK, assign calculated data to mt2063_state structure
*/
state->f_IF1_actual = state->AS_Data.f_LO1 - f_in;
}
return status;
}
static const u8 MT2063B0_defaults[] = {
/* Reg, Value */
0x19, 0x05,
0x1B, 0x1D,
0x1C, 0x1F,
0x1D, 0x0F,
0x1E, 0x3F,
0x1F, 0x0F,
0x20, 0x3F,
0x22, 0x21,
0x23, 0x3F,
0x24, 0x20,
0x25, 0x3F,
0x27, 0xEE,
0x2C, 0x27, /* bit at 0x20 is cleared below */
0x30, 0x03,
0x2C, 0x07, /* bit at 0x20 is cleared here */
0x2D, 0x87,
0x2E, 0xAA,
0x28, 0xE1, /* Set the FIFCrst bit here */
0x28, 0xE0, /* Clear the FIFCrst bit here */
0x00
};
/* writing 0x05 0xf0 sw-resets all registers, so we write only needed changes */
static const u8 MT2063B1_defaults[] = {
/* Reg, Value */
0x05, 0xF0,
0x11, 0x10, /* New Enable AFCsd */
0x19, 0x05,
0x1A, 0x6C,
0x1B, 0x24,
0x1C, 0x28,
0x1D, 0x8F,
0x1E, 0x14,
0x1F, 0x8F,
0x20, 0x57,
0x22, 0x21, /* New - ver 1.03 */
0x23, 0x3C, /* New - ver 1.10 */
0x24, 0x20, /* New - ver 1.03 */
0x2C, 0x24, /* bit at 0x20 is cleared below */
0x2D, 0x87, /* FIFFQ=0 */
0x2F, 0xF3,
0x30, 0x0C, /* New - ver 1.11 */
0x31, 0x1B, /* New - ver 1.11 */
0x2C, 0x04, /* bit at 0x20 is cleared here */
0x28, 0xE1, /* Set the FIFCrst bit here */
0x28, 0xE0, /* Clear the FIFCrst bit here */
0x00
};
/* writing 0x05 0xf0 sw-resets all registers, so we write only needed changes */
static const u8 MT2063B3_defaults[] = {
/* Reg, Value */
0x05, 0xF0,
0x19, 0x3D,
0x2C, 0x24, /* bit at 0x20 is cleared below */
0x2C, 0x04, /* bit at 0x20 is cleared here */
0x28, 0xE1, /* Set the FIFCrst bit here */
0x28, 0xE0, /* Clear the FIFCrst bit here */
0x00
};
static int mt2063_init(struct dvb_frontend *fe)
{
u32 status;
struct mt2063_state *state = fe->tuner_priv;
u8 all_resets = 0xF0; /* reset/load bits */
const u8 *def = NULL;
char *step;
u32 FCRUN;
s32 maxReads;
u32 fcu_osc;
u32 i;
dprintk(2, "\n");
state->rcvr_mode = MT2063_CABLE_QAM;
/* Read the Part/Rev code from the tuner */
status = mt2063_read(state, MT2063_REG_PART_REV,
&state->reg[MT2063_REG_PART_REV], 1);
if (status < 0) {
printk(KERN_ERR "Can't read mt2063 part ID\n");
return status;
}
/* Check the part/rev code */
switch (state->reg[MT2063_REG_PART_REV]) {
case MT2063_B0:
step = "B0";
break;
case MT2063_B1:
step = "B1";
break;
case MT2063_B2:
step = "B2";
break;
case MT2063_B3:
step = "B3";
break;
default:
printk(KERN_ERR "mt2063: Unknown mt2063 device ID (0x%02x)\n",
state->reg[MT2063_REG_PART_REV]);
return -ENODEV; /* Wrong tuner Part/Rev code */
}
/* Check the 2nd byte of the Part/Rev code from the tuner */
status = mt2063_read(state, MT2063_REG_RSVD_3B,
&state->reg[MT2063_REG_RSVD_3B], 1);
/* b7 != 0 ==> NOT MT2063 */
if (status < 0 || ((state->reg[MT2063_REG_RSVD_3B] & 0x80) != 0x00)) {
printk(KERN_ERR "mt2063: Unknown part ID (0x%02x%02x)\n",
state->reg[MT2063_REG_PART_REV],
state->reg[MT2063_REG_RSVD_3B]);
return -ENODEV; /* Wrong tuner Part/Rev code */
}
printk(KERN_INFO "mt2063: detected a mt2063 %s\n", step);
/* Reset the tuner */
status = mt2063_write(state, MT2063_REG_LO2CQ_3, &all_resets, 1);
if (status < 0)
return status;
/* change all of the default values that vary from the HW reset values */
/* def = (state->reg[PART_REV] == MT2063_B0) ? MT2063B0_defaults : MT2063B1_defaults; */
switch (state->reg[MT2063_REG_PART_REV]) {
case MT2063_B3:
def = MT2063B3_defaults;
break;
case MT2063_B1:
def = MT2063B1_defaults;
break;
case MT2063_B0:
def = MT2063B0_defaults;
break;
default:
return -ENODEV;
break;
}
while (status >= 0 && *def) {
u8 reg = *def++;
u8 val = *def++;
status = mt2063_write(state, reg, &val, 1);
}
if (status < 0)
return status;
/* Wait for FIFF location to complete. */
FCRUN = 1;
maxReads = 10;
while (status >= 0 && (FCRUN != 0) && (maxReads-- > 0)) {
msleep(2);
status = mt2063_read(state,
MT2063_REG_XO_STATUS,
&state->
reg[MT2063_REG_XO_STATUS], 1);
FCRUN = (state->reg[MT2063_REG_XO_STATUS] & 0x40) >> 6;
}
if (FCRUN != 0 || status < 0)
return -ENODEV;
status = mt2063_read(state,
MT2063_REG_FIFFC,
&state->reg[MT2063_REG_FIFFC], 1);
if (status < 0)
return status;
/* Read back all the registers from the tuner */
status = mt2063_read(state,
MT2063_REG_PART_REV,
state->reg, MT2063_REG_END_REGS);
if (status < 0)
return status;
/* Initialize the tuner state. */
state->tuner_id = state->reg[MT2063_REG_PART_REV];
state->AS_Data.f_ref = MT2063_REF_FREQ;
state->AS_Data.f_if1_Center = (state->AS_Data.f_ref / 8) *
((u32) state->reg[MT2063_REG_FIFFC] + 640);
state->AS_Data.f_if1_bw = MT2063_IF1_BW;
state->AS_Data.f_out = 43750000UL;
state->AS_Data.f_out_bw = 6750000UL;
state->AS_Data.f_zif_bw = MT2063_ZIF_BW;
state->AS_Data.f_LO1_Step = state->AS_Data.f_ref / 64;
state->AS_Data.f_LO2_Step = MT2063_TUNE_STEP_SIZE;
state->AS_Data.maxH1 = MT2063_MAX_HARMONICS_1;
state->AS_Data.maxH2 = MT2063_MAX_HARMONICS_2;
state->AS_Data.f_min_LO_Separation = MT2063_MIN_LO_SEP;
state->AS_Data.f_if1_Request = state->AS_Data.f_if1_Center;
state->AS_Data.f_LO1 = 2181000000UL;
state->AS_Data.f_LO2 = 1486249786UL;
state->f_IF1_actual = state->AS_Data.f_if1_Center;
state->AS_Data.f_in = state->AS_Data.f_LO1 - state->f_IF1_actual;
state->AS_Data.f_LO1_FracN_Avoid = MT2063_LO1_FRACN_AVOID;
state->AS_Data.f_LO2_FracN_Avoid = MT2063_LO2_FRACN_AVOID;
state->num_regs = MT2063_REG_END_REGS;
state->AS_Data.avoidDECT = MT2063_AVOID_BOTH;
state->ctfilt_sw = 0;
state->CTFiltMax[0] = 69230000;
state->CTFiltMax[1] = 105770000;
state->CTFiltMax[2] = 140350000;
state->CTFiltMax[3] = 177110000;
state->CTFiltMax[4] = 212860000;
state->CTFiltMax[5] = 241130000;
state->CTFiltMax[6] = 274370000;
state->CTFiltMax[7] = 309820000;
state->CTFiltMax[8] = 342450000;
state->CTFiltMax[9] = 378870000;
state->CTFiltMax[10] = 416210000;
state->CTFiltMax[11] = 456500000;
state->CTFiltMax[12] = 495790000;
state->CTFiltMax[13] = 534530000;
state->CTFiltMax[14] = 572610000;
state->CTFiltMax[15] = 598970000;
state->CTFiltMax[16] = 635910000;
state->CTFiltMax[17] = 672130000;
state->CTFiltMax[18] = 714840000;
state->CTFiltMax[19] = 739660000;
state->CTFiltMax[20] = 770410000;
state->CTFiltMax[21] = 814660000;
state->CTFiltMax[22] = 846950000;
state->CTFiltMax[23] = 867820000;
state->CTFiltMax[24] = 915980000;
state->CTFiltMax[25] = 947450000;
state->CTFiltMax[26] = 983110000;
state->CTFiltMax[27] = 1021630000;
state->CTFiltMax[28] = 1061870000;
state->CTFiltMax[29] = 1098330000;
state->CTFiltMax[30] = 1138990000;
/*
** Fetch the FCU osc value and use it and the fRef value to
** scale all of the Band Max values
*/
state->reg[MT2063_REG_CTUNE_CTRL] = 0x0A;
status = mt2063_write(state, MT2063_REG_CTUNE_CTRL,
&state->reg[MT2063_REG_CTUNE_CTRL], 1);
if (status < 0)
return status;
/* Read the ClearTune filter calibration value */
status = mt2063_read(state, MT2063_REG_FIFFC,
&state->reg[MT2063_REG_FIFFC], 1);
if (status < 0)
return status;
fcu_osc = state->reg[MT2063_REG_FIFFC];
state->reg[MT2063_REG_CTUNE_CTRL] = 0x00;
status = mt2063_write(state, MT2063_REG_CTUNE_CTRL,
&state->reg[MT2063_REG_CTUNE_CTRL], 1);
if (status < 0)
return status;
/* Adjust each of the values in the ClearTune filter cross-over table */
for (i = 0; i < 31; i++)
state->CTFiltMax[i] = (state->CTFiltMax[i] / 768) * (fcu_osc + 640);
status = MT2063_SoftwareShutdown(state, 1);
if (status < 0)
return status;
status = MT2063_ClearPowerMaskBits(state, MT2063_ALL_SD);
if (status < 0)
return status;
state->init = true;
return 0;
}
static int mt2063_get_status(struct dvb_frontend *fe, u32 *tuner_status)
{
struct mt2063_state *state = fe->tuner_priv;
int status;
dprintk(2, "\n");
if (!state->init)
return -ENODEV;
*tuner_status = 0;
status = mt2063_lockStatus(state);
if (status < 0)
return status;
if (status)
*tuner_status = TUNER_STATUS_LOCKED;
dprintk(1, "Tuner status: %d", *tuner_status);
return 0;
}
static int mt2063_release(struct dvb_frontend *fe)
{
struct mt2063_state *state = fe->tuner_priv;
dprintk(2, "\n");
fe->tuner_priv = NULL;
kfree(state);
return 0;
}
static int mt2063_set_analog_params(struct dvb_frontend *fe,
struct analog_parameters *params)
{
struct mt2063_state *state = fe->tuner_priv;
s32 pict_car;
s32 pict2chanb_vsb;
s32 ch_bw;
s32 if_mid;
s32 rcvr_mode;
int status;
dprintk(2, "\n");
if (!state->init) {
status = mt2063_init(fe);
if (status < 0)
return status;
}
switch (params->mode) {
case V4L2_TUNER_RADIO:
pict_car = 38900000;
ch_bw = 8000000;
pict2chanb_vsb = -(ch_bw / 2);
rcvr_mode = MT2063_OFFAIR_ANALOG;
break;
case V4L2_TUNER_ANALOG_TV:
rcvr_mode = MT2063_CABLE_ANALOG;
if (params->std & ~V4L2_STD_MN) {
pict_car = 38900000;
ch_bw = 6000000;
pict2chanb_vsb = -1250000;
} else if (params->std & V4L2_STD_PAL_G) {
pict_car = 38900000;
ch_bw = 7000000;
pict2chanb_vsb = -1250000;
} else { /* PAL/SECAM standards */
pict_car = 38900000;
ch_bw = 8000000;
pict2chanb_vsb = -1250000;
}
break;
default:
return -EINVAL;
}
if_mid = pict_car - (pict2chanb_vsb + (ch_bw / 2));
state->AS_Data.f_LO2_Step = 125000; /* FIXME: probably 5000 for FM */
state->AS_Data.f_out = if_mid;
state->AS_Data.f_out_bw = ch_bw + 750000;
status = MT2063_SetReceiverMode(state, rcvr_mode);
if (status < 0)
return status;
dprintk(1, "Tuning to frequency: %d, bandwidth %d, foffset %d\n",
params->frequency, ch_bw, pict2chanb_vsb);
status = MT2063_Tune(state, (params->frequency + (pict2chanb_vsb + (ch_bw / 2))));
if (status < 0)
return status;
state->frequency = params->frequency;
return 0;
}
/*
* As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
* So, the amount of the needed bandwith is given by:
* Bw = Symbol_rate * (1 + 0.15)
* As such, the maximum symbol rate supported by 6 MHz is given by:
* max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
*/
#define MAX_SYMBOL_RATE_6MHz 5217391
static int mt2063_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct mt2063_state *state = fe->tuner_priv;
int status;
s32 pict_car;
s32 pict2chanb_vsb;
s32 ch_bw;
s32 if_mid;
s32 rcvr_mode;
if (!state->init) {
status = mt2063_init(fe);
if (status < 0)
return status;
}
dprintk(2, "\n");
if (c->bandwidth_hz == 0)
return -EINVAL;
if (c->bandwidth_hz <= 6000000)
ch_bw = 6000000;
else if (c->bandwidth_hz <= 7000000)
ch_bw = 7000000;
else
ch_bw = 8000000;
switch (c->delivery_system) {
case SYS_DVBT:
rcvr_mode = MT2063_OFFAIR_COFDM;
pict_car = 36125000;
pict2chanb_vsb = -(ch_bw / 2);
break;
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
rcvr_mode = MT2063_CABLE_QAM;
pict_car = 36125000;
pict2chanb_vsb = -(ch_bw / 2);
break;
default:
return -EINVAL;
}
if_mid = pict_car - (pict2chanb_vsb + (ch_bw / 2));
state->AS_Data.f_LO2_Step = 125000; /* FIXME: probably 5000 for FM */
state->AS_Data.f_out = if_mid;
state->AS_Data.f_out_bw = ch_bw + 750000;
status = MT2063_SetReceiverMode(state, rcvr_mode);
if (status < 0)
return status;
dprintk(1, "Tuning to frequency: %d, bandwidth %d, foffset %d\n",
c->frequency, ch_bw, pict2chanb_vsb);
status = MT2063_Tune(state, (c->frequency + (pict2chanb_vsb + (ch_bw / 2))));
if (status < 0)
return status;
state->frequency = c->frequency;
return 0;
}
static int mt2063_get_if_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct mt2063_state *state = fe->tuner_priv;
dprintk(2, "\n");
if (!state->init)
return -ENODEV;
*freq = state->AS_Data.f_out;
dprintk(1, "IF frequency: %d\n", *freq);
return 0;
}
static int mt2063_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
{
struct mt2063_state *state = fe->tuner_priv;
dprintk(2, "\n");
if (!state->init)
return -ENODEV;
*bw = state->AS_Data.f_out_bw - 750000;
dprintk(1, "bandwidth: %d\n", *bw);
return 0;
}
static struct dvb_tuner_ops mt2063_ops = {
.info = {
.name = "MT2063 Silicon Tuner",
.frequency_min = 45000000,
.frequency_max = 865000000,
.frequency_step = 0,
},
.init = mt2063_init,
.sleep = MT2063_Sleep,
.get_status = mt2063_get_status,
.set_analog_params = mt2063_set_analog_params,
.set_params = mt2063_set_params,
.get_if_frequency = mt2063_get_if_frequency,
.get_bandwidth = mt2063_get_bandwidth,
.release = mt2063_release,
};
struct dvb_frontend *mt2063_attach(struct dvb_frontend *fe,
struct mt2063_config *config,
struct i2c_adapter *i2c)
{
struct mt2063_state *state = NULL;
dprintk(2, "\n");
state = kzalloc(sizeof(struct mt2063_state), GFP_KERNEL);
if (state == NULL)
goto error;
state->config = config;
state->i2c = i2c;
state->frontend = fe;
state->reference = config->refclock / 1000; /* kHz */
fe->tuner_priv = state;
fe->ops.tuner_ops = mt2063_ops;
printk(KERN_INFO "%s: Attaching MT2063\n", __func__);
return fe;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL_GPL(mt2063_attach);
/*
* Ancillary routines visible outside mt2063
* FIXME: Remove them in favor of using standard tuner callbacks
*/
unsigned int tuner_MT2063_SoftwareShutdown(struct dvb_frontend *fe)
{
struct mt2063_state *state = fe->tuner_priv;
int err = 0;
dprintk(2, "\n");
err = MT2063_SoftwareShutdown(state, 1);
if (err < 0)
printk(KERN_ERR "%s: Couldn't shutdown\n", __func__);
return err;
}
EXPORT_SYMBOL_GPL(tuner_MT2063_SoftwareShutdown);
unsigned int tuner_MT2063_ClearPowerMaskBits(struct dvb_frontend *fe)
{
struct mt2063_state *state = fe->tuner_priv;
int err = 0;
dprintk(2, "\n");
err = MT2063_ClearPowerMaskBits(state, MT2063_ALL_SD);
if (err < 0)
printk(KERN_ERR "%s: Invalid parameter\n", __func__);
return err;
}
EXPORT_SYMBOL_GPL(tuner_MT2063_ClearPowerMaskBits);
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_DESCRIPTION("MT2063 Silicon tuner");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Abhinav1997/android_kernel_lge_msm8226 | fs/ubifs/budget.c | 5196 | 24369 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Adrian Hunter
* Artem Bityutskiy (Битюцкий Артём)
*/
/*
* This file implements the budgeting sub-system which is responsible for UBIFS
* space management.
*
* Factors such as compression, wasted space at the ends of LEBs, space in other
* journal heads, the effect of updates on the index, and so on, make it
* impossible to accurately predict the amount of space needed. Consequently
* approximations are used.
*/
#include "ubifs.h"
#include <linux/writeback.h>
#include <linux/math64.h>
/*
* When pessimistic budget calculations say that there is no enough space,
* UBIFS starts writing back dirty inodes and pages, doing garbage collection,
* or committing. The below constant defines maximum number of times UBIFS
* repeats the operations.
*/
#define MAX_MKSPC_RETRIES 3
/*
* The below constant defines amount of dirty pages which should be written
* back at when trying to shrink the liability.
*/
#define NR_TO_WRITE 16
/**
* shrink_liability - write-back some dirty pages/inodes.
* @c: UBIFS file-system description object
* @nr_to_write: how many dirty pages to write-back
*
* This function shrinks UBIFS liability by means of writing back some amount
* of dirty inodes and their pages.
*
* Note, this function synchronizes even VFS inodes which are locked
* (@i_mutex) by the caller of the budgeting function, because write-back does
* not touch @i_mutex.
*/
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
down_read(&c->vfs_sb->s_umount);
writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE);
up_read(&c->vfs_sb->s_umount);
}
/**
* run_gc - run garbage collector.
* @c: UBIFS file-system description object
*
* This function runs garbage collector to make some more free space. Returns
* zero if a free LEB has been produced, %-EAGAIN if commit is required, and a
* negative error code in case of failure.
*/
static int run_gc(struct ubifs_info *c)
{
int err, lnum;
/* Make some free space by garbage-collecting dirty space */
down_read(&c->commit_sem);
lnum = ubifs_garbage_collect(c, 1);
up_read(&c->commit_sem);
if (lnum < 0)
return lnum;
/* GC freed one LEB, return it to lprops */
dbg_budg("GC freed LEB %d", lnum);
err = ubifs_return_leb(c, lnum);
if (err)
return err;
return 0;
}
/**
* get_liability - calculate current liability.
* @c: UBIFS file-system description object
*
* This function calculates and returns current UBIFS liability, i.e. the
* amount of bytes UBIFS has "promised" to write to the media.
*/
static long long get_liability(struct ubifs_info *c)
{
long long liab;
spin_lock(&c->space_lock);
liab = c->bi.idx_growth + c->bi.data_growth + c->bi.dd_growth;
spin_unlock(&c->space_lock);
return liab;
}
/**
* make_free_space - make more free space on the file-system.
* @c: UBIFS file-system description object
*
* This function is called when an operation cannot be budgeted because there
* is supposedly no free space. But in most cases there is some free space:
* o budgeting is pessimistic, so it always budgets more than it is actually
* needed, so shrinking the liability is one way to make free space - the
* cached data will take less space then it was budgeted for;
* o GC may turn some dark space into free space (budgeting treats dark space
* as not available);
* o commit may free some LEB, i.e., turn freeable LEBs into free LEBs.
*
* So this function tries to do the above. Returns %-EAGAIN if some free space
* was presumably made and the caller has to re-try budgeting the operation.
* Returns %-ENOSPC if it couldn't do more free space, and other negative error
* codes on failures.
*/
static int make_free_space(struct ubifs_info *c)
{
int err, retries = 0;
long long liab1, liab2;
do {
liab1 = get_liability(c);
/*
* We probably have some dirty pages or inodes (liability), try
* to write them back.
*/
dbg_budg("liability %lld, run write-back", liab1);
shrink_liability(c, NR_TO_WRITE);
liab2 = get_liability(c);
if (liab2 < liab1)
return -EAGAIN;
dbg_budg("new liability %lld (not shrunk)", liab2);
/* Liability did not shrink again, try GC */
dbg_budg("Run GC");
err = run_gc(c);
if (!err)
return -EAGAIN;
if (err != -EAGAIN && err != -ENOSPC)
/* Some real error happened */
return err;
dbg_budg("Run commit (retries %d)", retries);
err = ubifs_run_commit(c);
if (err)
return err;
} while (retries++ < MAX_MKSPC_RETRIES);
return -ENOSPC;
}
/**
* ubifs_calc_min_idx_lebs - calculate amount of LEBs for the index.
* @c: UBIFS file-system description object
*
* This function calculates and returns the number of LEBs which should be kept
* for index usage.
*/
int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
{
int idx_lebs;
long long idx_size;
idx_size = c->bi.old_idx_sz + c->bi.idx_growth + c->bi.uncommitted_idx;
/* And make sure we have thrice the index size of space reserved */
idx_size += idx_size << 1;
/*
* We do not maintain 'old_idx_size' as 'old_idx_lebs'/'old_idx_bytes'
* pair, nor similarly the two variables for the new index size, so we
* have to do this costly 64-bit division on fast-path.
*/
idx_lebs = div_u64(idx_size + c->idx_leb_size - 1, c->idx_leb_size);
/*
* The index head is not available for the in-the-gaps method, so add an
* extra LEB to compensate.
*/
idx_lebs += 1;
if (idx_lebs < MIN_INDEX_LEBS)
idx_lebs = MIN_INDEX_LEBS;
return idx_lebs;
}
/**
* ubifs_calc_available - calculate available FS space.
* @c: UBIFS file-system description object
* @min_idx_lebs: minimum number of LEBs reserved for the index
*
* This function calculates and returns amount of FS space available for use.
*/
long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
{
int subtract_lebs;
long long available;
available = c->main_bytes - c->lst.total_used;
/*
* Now 'available' contains theoretically available flash space
* assuming there is no index, so we have to subtract the space which
* is reserved for the index.
*/
subtract_lebs = min_idx_lebs;
/* Take into account that GC reserves one LEB for its own needs */
subtract_lebs += 1;
/*
* The GC journal head LEB is not really accessible. And since
* different write types go to different heads, we may count only on
* one head's space.
*/
subtract_lebs += c->jhead_cnt - 1;
/* We also reserve one LEB for deletions, which bypass budgeting */
subtract_lebs += 1;
available -= (long long)subtract_lebs * c->leb_size;
/* Subtract the dead space which is not available for use */
available -= c->lst.total_dead;
/*
* Subtract dark space, which might or might not be usable - it depends
* on the data which we have on the media and which will be written. If
* this is a lot of uncompressed or not-compressible data, the dark
* space cannot be used.
*/
available -= c->lst.total_dark;
/*
* However, there is more dark space. The index may be bigger than
* @min_idx_lebs. Those extra LEBs are assumed to be available, but
* their dark space is not included in total_dark, so it is subtracted
* here.
*/
if (c->lst.idx_lebs > min_idx_lebs) {
subtract_lebs = c->lst.idx_lebs - min_idx_lebs;
available -= subtract_lebs * c->dark_wm;
}
/* The calculations are rough and may end up with a negative number */
return available > 0 ? available : 0;
}
/**
* can_use_rp - check whether the user is allowed to use reserved pool.
* @c: UBIFS file-system description object
*
* UBIFS has so-called "reserved pool" which is flash space reserved
* for the superuser and for uses whose UID/GID is recorded in UBIFS superblock.
* This function checks whether current user is allowed to use reserved pool.
* Returns %1 current user is allowed to use reserved pool and %0 otherwise.
*/
static int can_use_rp(struct ubifs_info *c)
{
if (current_fsuid() == c->rp_uid || capable(CAP_SYS_RESOURCE) ||
(c->rp_gid != 0 && in_group_p(c->rp_gid)))
return 1;
return 0;
}
/**
* do_budget_space - reserve flash space for index and data growth.
* @c: UBIFS file-system description object
*
* This function makes sure UBIFS has enough free LEBs for index growth and
* data.
*
* When budgeting index space, UBIFS reserves thrice as many LEBs as the index
* would take if it was consolidated and written to the flash. This guarantees
* that the "in-the-gaps" commit method always succeeds and UBIFS will always
* be able to commit dirty index. So this function basically adds amount of
* budgeted index space to the size of the current index, multiplies this by 3,
* and makes sure this does not exceed the amount of free LEBs.
*
* Notes about @c->bi.min_idx_lebs and @c->lst.idx_lebs variables:
* o @c->lst.idx_lebs is the number of LEBs the index currently uses. It might
* be large, because UBIFS does not do any index consolidation as long as
* there is free space. IOW, the index may take a lot of LEBs, but the LEBs
* will contain a lot of dirt.
* o @c->bi.min_idx_lebs is the number of LEBS the index presumably takes. IOW,
* the index may be consolidated to take up to @c->bi.min_idx_lebs LEBs.
*
* This function returns zero in case of success, and %-ENOSPC in case of
* failure.
*/
static int do_budget_space(struct ubifs_info *c)
{
long long outstanding, available;
int lebs, rsvd_idx_lebs, min_idx_lebs;
/* First budget index space */
min_idx_lebs = ubifs_calc_min_idx_lebs(c);
/* Now 'min_idx_lebs' contains number of LEBs to reserve */
if (min_idx_lebs > c->lst.idx_lebs)
rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs;
else
rsvd_idx_lebs = 0;
/*
* The number of LEBs that are available to be used by the index is:
*
* @c->lst.empty_lebs + @c->freeable_cnt + @c->idx_gc_cnt -
* @c->lst.taken_empty_lebs
*
* @c->lst.empty_lebs are available because they are empty.
* @c->freeable_cnt are available because they contain only free and
* dirty space, @c->idx_gc_cnt are available because they are index
* LEBs that have been garbage collected and are awaiting the commit
* before they can be used. And the in-the-gaps method will grab these
* if it needs them. @c->lst.taken_empty_lebs are empty LEBs that have
* already been allocated for some purpose.
*
* Note, @c->idx_gc_cnt is included to both @c->lst.empty_lebs (because
* these LEBs are empty) and to @c->lst.taken_empty_lebs (because they
* are taken until after the commit).
*
* Note, @c->lst.taken_empty_lebs may temporarily be higher by one
* because of the way we serialize LEB allocations and budgeting. See a
* comment in 'ubifs_find_free_space()'.
*/
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
c->lst.taken_empty_lebs;
if (unlikely(rsvd_idx_lebs > lebs)) {
dbg_budg("out of indexing space: min_idx_lebs %d (old %d), "
"rsvd_idx_lebs %d", min_idx_lebs, c->bi.min_idx_lebs,
rsvd_idx_lebs);
return -ENOSPC;
}
available = ubifs_calc_available(c, min_idx_lebs);
outstanding = c->bi.data_growth + c->bi.dd_growth;
if (unlikely(available < outstanding)) {
dbg_budg("out of data space: available %lld, outstanding %lld",
available, outstanding);
return -ENOSPC;
}
if (available - outstanding <= c->rp_size && !can_use_rp(c))
return -ENOSPC;
c->bi.min_idx_lebs = min_idx_lebs;
return 0;
}
/**
* calc_idx_growth - calculate approximate index growth from budgeting request.
* @c: UBIFS file-system description object
* @req: budgeting request
*
* For now we assume each new node adds one znode. But this is rather poor
* approximation, though.
*/
static int calc_idx_growth(const struct ubifs_info *c,
const struct ubifs_budget_req *req)
{
int znodes;
znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) +
req->new_dent;
return znodes * c->max_idx_node_sz;
}
/**
* calc_data_growth - calculate approximate amount of new data from budgeting
* request.
* @c: UBIFS file-system description object
* @req: budgeting request
*/
static int calc_data_growth(const struct ubifs_info *c,
const struct ubifs_budget_req *req)
{
int data_growth;
data_growth = req->new_ino ? c->bi.inode_budget : 0;
if (req->new_page)
data_growth += c->bi.page_budget;
if (req->new_dent)
data_growth += c->bi.dent_budget;
data_growth += req->new_ino_d;
return data_growth;
}
/**
* calc_dd_growth - calculate approximate amount of data which makes other data
* dirty from budgeting request.
* @c: UBIFS file-system description object
* @req: budgeting request
*/
static int calc_dd_growth(const struct ubifs_info *c,
const struct ubifs_budget_req *req)
{
int dd_growth;
dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
if (req->dirtied_ino)
dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
if (req->mod_dent)
dd_growth += c->bi.dent_budget;
dd_growth += req->dirtied_ino_d;
return dd_growth;
}
/**
* ubifs_budget_space - ensure there is enough space to complete an operation.
* @c: UBIFS file-system description object
* @req: budget request
*
* This function allocates budget for an operation. It uses pessimistic
* approximation of how much flash space the operation needs. The goal of this
* function is to make sure UBIFS always has flash space to flush all dirty
* pages, dirty inodes, and dirty znodes (liability). This function may force
* commit, garbage-collection or write-back. Returns zero in case of success,
* %-ENOSPC if there is no free space and other negative error codes in case of
* failures.
*/
int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req)
{
int uninitialized_var(cmt_retries), uninitialized_var(wb_retries);
int err, idx_growth, data_growth, dd_growth, retried = 0;
ubifs_assert(req->new_page <= 1);
ubifs_assert(req->dirtied_page <= 1);
ubifs_assert(req->new_dent <= 1);
ubifs_assert(req->mod_dent <= 1);
ubifs_assert(req->new_ino <= 1);
ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
ubifs_assert(req->dirtied_ino <= 4);
ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
ubifs_assert(!(req->new_ino_d & 7));
ubifs_assert(!(req->dirtied_ino_d & 7));
data_growth = calc_data_growth(c, req);
dd_growth = calc_dd_growth(c, req);
if (!data_growth && !dd_growth)
return 0;
idx_growth = calc_idx_growth(c, req);
again:
spin_lock(&c->space_lock);
ubifs_assert(c->bi.idx_growth >= 0);
ubifs_assert(c->bi.data_growth >= 0);
ubifs_assert(c->bi.dd_growth >= 0);
if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) {
dbg_budg("no space");
spin_unlock(&c->space_lock);
return -ENOSPC;
}
c->bi.idx_growth += idx_growth;
c->bi.data_growth += data_growth;
c->bi.dd_growth += dd_growth;
err = do_budget_space(c);
if (likely(!err)) {
req->idx_growth = idx_growth;
req->data_growth = data_growth;
req->dd_growth = dd_growth;
spin_unlock(&c->space_lock);
return 0;
}
/* Restore the old values */
c->bi.idx_growth -= idx_growth;
c->bi.data_growth -= data_growth;
c->bi.dd_growth -= dd_growth;
spin_unlock(&c->space_lock);
if (req->fast) {
dbg_budg("no space for fast budgeting");
return err;
}
err = make_free_space(c);
cond_resched();
if (err == -EAGAIN) {
dbg_budg("try again");
goto again;
} else if (err == -ENOSPC) {
if (!retried) {
retried = 1;
dbg_budg("-ENOSPC, but anyway try once again");
goto again;
}
dbg_budg("FS is full, -ENOSPC");
c->bi.nospace = 1;
if (can_use_rp(c) || c->rp_size == 0)
c->bi.nospace_rp = 1;
smp_wmb();
} else
ubifs_err("cannot budget space, error %d", err);
return err;
}
/**
* ubifs_release_budget - release budgeted free space.
* @c: UBIFS file-system description object
* @req: budget request
*
* This function releases the space budgeted by 'ubifs_budget_space()'. Note,
* since the index changes (which were budgeted for in @req->idx_growth) will
* only be written to the media on commit, this function moves the index budget
* from @c->bi.idx_growth to @c->bi.uncommitted_idx. The latter will be zeroed
* by the commit operation.
*/
void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req)
{
ubifs_assert(req->new_page <= 1);
ubifs_assert(req->dirtied_page <= 1);
ubifs_assert(req->new_dent <= 1);
ubifs_assert(req->mod_dent <= 1);
ubifs_assert(req->new_ino <= 1);
ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA);
ubifs_assert(req->dirtied_ino <= 4);
ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4);
ubifs_assert(!(req->new_ino_d & 7));
ubifs_assert(!(req->dirtied_ino_d & 7));
if (!req->recalculate) {
ubifs_assert(req->idx_growth >= 0);
ubifs_assert(req->data_growth >= 0);
ubifs_assert(req->dd_growth >= 0);
}
if (req->recalculate) {
req->data_growth = calc_data_growth(c, req);
req->dd_growth = calc_dd_growth(c, req);
req->idx_growth = calc_idx_growth(c, req);
}
if (!req->data_growth && !req->dd_growth)
return;
c->bi.nospace = c->bi.nospace_rp = 0;
smp_wmb();
spin_lock(&c->space_lock);
c->bi.idx_growth -= req->idx_growth;
c->bi.uncommitted_idx += req->idx_growth;
c->bi.data_growth -= req->data_growth;
c->bi.dd_growth -= req->dd_growth;
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
ubifs_assert(c->bi.idx_growth >= 0);
ubifs_assert(c->bi.data_growth >= 0);
ubifs_assert(c->bi.dd_growth >= 0);
ubifs_assert(c->bi.min_idx_lebs < c->main_lebs);
ubifs_assert(!(c->bi.idx_growth & 7));
ubifs_assert(!(c->bi.data_growth & 7));
ubifs_assert(!(c->bi.dd_growth & 7));
spin_unlock(&c->space_lock);
}
/**
* ubifs_convert_page_budget - convert budget of a new page.
* @c: UBIFS file-system description object
*
* This function converts budget which was allocated for a new page of data to
* the budget of changing an existing page of data. The latter is smaller than
* the former, so this function only does simple re-calculation and does not
* involve any write-back.
*/
void ubifs_convert_page_budget(struct ubifs_info *c)
{
spin_lock(&c->space_lock);
/* Release the index growth reservation */
c->bi.idx_growth -= c->max_idx_node_sz << UBIFS_BLOCKS_PER_PAGE_SHIFT;
/* Release the data growth reservation */
c->bi.data_growth -= c->bi.page_budget;
/* Increase the dirty data growth reservation instead */
c->bi.dd_growth += c->bi.page_budget;
/* And re-calculate the indexing space reservation */
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
spin_unlock(&c->space_lock);
}
/**
* ubifs_release_dirty_inode_budget - release dirty inode budget.
* @c: UBIFS file-system description object
* @ui: UBIFS inode to release the budget for
*
* This function releases budget corresponding to a dirty inode. It is usually
* called when after the inode has been written to the media and marked as
* clean. It also causes the "no space" flags to be cleared.
*/
void ubifs_release_dirty_inode_budget(struct ubifs_info *c,
struct ubifs_inode *ui)
{
struct ubifs_budget_req req;
memset(&req, 0, sizeof(struct ubifs_budget_req));
/* The "no space" flags will be cleared because dd_growth is > 0 */
req.dd_growth = c->bi.inode_budget + ALIGN(ui->data_len, 8);
ubifs_release_budget(c, &req);
}
/**
* ubifs_reported_space - calculate reported free space.
* @c: the UBIFS file-system description object
* @free: amount of free space
*
* This function calculates amount of free space which will be reported to
* user-space. User-space application tend to expect that if the file-system
* (e.g., via the 'statfs()' call) reports that it has N bytes available, they
* are able to write a file of size N. UBIFS attaches node headers to each data
* node and it has to write indexing nodes as well. This introduces additional
* overhead, and UBIFS has to report slightly less free space to meet the above
* expectations.
*
* This function assumes free space is made up of uncompressed data nodes and
* full index nodes (one per data node, tripled because we always allow enough
* space to write the index thrice).
*
* Note, the calculation is pessimistic, which means that most of the time
* UBIFS reports less space than it actually has.
*/
long long ubifs_reported_space(const struct ubifs_info *c, long long free)
{
int divisor, factor, f;
/*
* Reported space size is @free * X, where X is UBIFS block size
* divided by UBIFS block size + all overhead one data block
* introduces. The overhead is the node header + indexing overhead.
*
* Indexing overhead calculations are based on the following formula:
* I = N/(f - 1) + 1, where I - number of indexing nodes, N - number
* of data nodes, f - fanout. Because effective UBIFS fanout is twice
* as less than maximum fanout, we assume that each data node
* introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes.
* Note, the multiplier 3 is because UBIFS reserves thrice as more space
* for the index.
*/
f = c->fanout > 3 ? c->fanout >> 1 : 2;
factor = UBIFS_BLOCK_SIZE;
divisor = UBIFS_MAX_DATA_NODE_SZ;
divisor += (c->max_idx_node_sz * 3) / (f - 1);
free *= factor;
return div_u64(free, divisor);
}
/**
* ubifs_get_free_space_nolock - return amount of free space.
* @c: UBIFS file-system description object
*
* This function calculates amount of free space to report to user-space.
*
* Because UBIFS may introduce substantial overhead (the index, node headers,
* alignment, wastage at the end of LEBs, etc), it cannot report real amount of
* free flash space it has (well, because not all dirty space is reclaimable,
* UBIFS does not actually know the real amount). If UBIFS did so, it would
* bread user expectations about what free space is. Users seem to accustomed
* to assume that if the file-system reports N bytes of free space, they would
* be able to fit a file of N bytes to the FS. This almost works for
* traditional file-systems, because they have way less overhead than UBIFS.
* So, to keep users happy, UBIFS tries to take the overhead into account.
*/
long long ubifs_get_free_space_nolock(struct ubifs_info *c)
{
int rsvd_idx_lebs, lebs;
long long available, outstanding, free;
ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
outstanding = c->bi.data_growth + c->bi.dd_growth;
available = ubifs_calc_available(c, c->bi.min_idx_lebs);
/*
* When reporting free space to user-space, UBIFS guarantees that it is
* possible to write a file of free space size. This means that for
* empty LEBs we may use more precise calculations than
* 'ubifs_calc_available()' is using. Namely, we know that in empty
* LEBs we would waste only @c->leb_overhead bytes, not @c->dark_wm.
* Thus, amend the available space.
*
* Note, the calculations below are similar to what we have in
* 'do_budget_space()', so refer there for comments.
*/
if (c->bi.min_idx_lebs > c->lst.idx_lebs)
rsvd_idx_lebs = c->bi.min_idx_lebs - c->lst.idx_lebs;
else
rsvd_idx_lebs = 0;
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
c->lst.taken_empty_lebs;
lebs -= rsvd_idx_lebs;
available += lebs * (c->dark_wm - c->leb_overhead);
if (available > outstanding)
free = ubifs_reported_space(c, available - outstanding);
else
free = 0;
return free;
}
/**
* ubifs_get_free_space - return amount of free space.
* @c: UBIFS file-system description object
*
* This function calculates and returns amount of free space to report to
* user-space.
*/
long long ubifs_get_free_space(struct ubifs_info *c)
{
long long free;
spin_lock(&c->space_lock);
free = ubifs_get_free_space_nolock(c);
spin_unlock(&c->space_lock);
return free;
}
| gpl-2.0 |
proxuser/kartal | fs/dlm/ast.c | 5452 | 8227 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
#include "dlm_internal.h"
#include "lock.h"
#include "user.h"
static uint64_t dlm_cb_seq;
static spinlock_t dlm_cb_seq_spin;
static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
{
int i;
log_print("last_bast %x %llu flags %x mode %d sb %d %x",
lkb->lkb_id,
(unsigned long long)lkb->lkb_last_bast.seq,
lkb->lkb_last_bast.flags,
lkb->lkb_last_bast.mode,
lkb->lkb_last_bast.sb_status,
lkb->lkb_last_bast.sb_flags);
log_print("last_cast %x %llu flags %x mode %d sb %d %x",
lkb->lkb_id,
(unsigned long long)lkb->lkb_last_cast.seq,
lkb->lkb_last_cast.flags,
lkb->lkb_last_cast.mode,
lkb->lkb_last_cast.sb_status,
lkb->lkb_last_cast.sb_flags);
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
log_print("cb %x %llu flags %x mode %d sb %d %x",
lkb->lkb_id,
(unsigned long long)lkb->lkb_callbacks[i].seq,
lkb->lkb_callbacks[i].flags,
lkb->lkb_callbacks[i].mode,
lkb->lkb_callbacks[i].sb_status,
lkb->lkb_callbacks[i].sb_flags);
}
}
int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
int status, uint32_t sbflags, uint64_t seq)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
uint64_t prev_seq;
int prev_mode;
int i, rv;
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
if (lkb->lkb_callbacks[i].seq)
continue;
/*
* Suppress some redundant basts here, do more on removal.
* Don't even add a bast if the callback just before it
* is a bast for the same mode or a more restrictive mode.
* (the addional > PR check is needed for PR/CW inversion)
*/
if ((i > 0) && (flags & DLM_CB_BAST) &&
(lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
prev_seq = lkb->lkb_callbacks[i-1].seq;
prev_mode = lkb->lkb_callbacks[i-1].mode;
if ((prev_mode == mode) ||
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
log_debug(ls, "skip %x add bast %llu mode %d "
"for bast %llu mode %d",
lkb->lkb_id,
(unsigned long long)seq,
mode,
(unsigned long long)prev_seq,
prev_mode);
rv = 0;
goto out;
}
}
lkb->lkb_callbacks[i].seq = seq;
lkb->lkb_callbacks[i].flags = flags;
lkb->lkb_callbacks[i].mode = mode;
lkb->lkb_callbacks[i].sb_status = status;
lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
rv = 0;
break;
}
if (i == DLM_CALLBACKS_SIZE) {
log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
lkb->lkb_id, (unsigned long long)seq,
flags, mode, status, sbflags);
dlm_dump_lkb_callbacks(lkb);
rv = -1;
goto out;
}
out:
return rv;
}
int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_callback *cb, int *resid)
{
int i, rv;
*resid = 0;
if (!lkb->lkb_callbacks[0].seq) {
rv = -ENOENT;
goto out;
}
/* oldest undelivered cb is callbacks[0] */
memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
/* shift others down */
for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
if (!lkb->lkb_callbacks[i].seq)
break;
memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
sizeof(struct dlm_callback));
memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
(*resid)++;
}
/* if cb is a bast, it should be skipped if the blocking mode is
compatible with the last granted mode */
if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
cb->flags |= DLM_CB_SKIP;
log_debug(ls, "skip %x bast %llu mode %d "
"for cast %llu mode %d",
lkb->lkb_id,
(unsigned long long)cb->seq,
cb->mode,
(unsigned long long)lkb->lkb_last_cast.seq,
lkb->lkb_last_cast.mode);
rv = 0;
goto out;
}
}
if (cb->flags & DLM_CB_CAST) {
memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
lkb->lkb_last_cast_time = ktime_get();
}
if (cb->flags & DLM_CB_BAST) {
memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
lkb->lkb_last_bast_time = ktime_get();
}
rv = 0;
out:
return rv;
}
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
uint32_t sbflags)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
uint64_t new_seq, prev_seq;
int rv;
spin_lock(&dlm_cb_seq_spin);
new_seq = ++dlm_cb_seq;
spin_unlock(&dlm_cb_seq_spin);
if (lkb->lkb_flags & DLM_IFL_USER) {
dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
return;
}
mutex_lock(&lkb->lkb_cb_mutex);
prev_seq = lkb->lkb_callbacks[0].seq;
rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
if (rv < 0)
goto out;
if (!prev_seq) {
kref_get(&lkb->lkb_ref);
if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
mutex_lock(&ls->ls_cb_mutex);
list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
mutex_unlock(&ls->ls_cb_mutex);
} else {
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
}
}
out:
mutex_unlock(&lkb->lkb_cb_mutex);
}
void dlm_callback_work(struct work_struct *work)
{
struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
void (*castfn) (void *astparam);
void (*bastfn) (void *astparam, int mode);
struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
int i, rv, resid;
memset(&callbacks, 0, sizeof(callbacks));
mutex_lock(&lkb->lkb_cb_mutex);
if (!lkb->lkb_callbacks[0].seq) {
/* no callback work exists, shouldn't happen */
log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
dlm_print_lkb(lkb);
dlm_dump_lkb_callbacks(lkb);
}
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
if (rv < 0)
break;
}
if (resid) {
/* cbs remain, loop should have removed all, shouldn't happen */
log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
resid);
dlm_print_lkb(lkb);
dlm_dump_lkb_callbacks(lkb);
}
mutex_unlock(&lkb->lkb_cb_mutex);
castfn = lkb->lkb_astfn;
bastfn = lkb->lkb_bastfn;
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
if (!callbacks[i].seq)
break;
if (callbacks[i].flags & DLM_CB_SKIP) {
continue;
} else if (callbacks[i].flags & DLM_CB_BAST) {
bastfn(lkb->lkb_astparam, callbacks[i].mode);
} else if (callbacks[i].flags & DLM_CB_CAST) {
lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
castfn(lkb->lkb_astparam);
}
}
/* undo kref_get from dlm_add_callback, may cause lkb to be freed */
dlm_put_lkb(lkb);
}
int dlm_callback_start(struct dlm_ls *ls)
{
ls->ls_callback_wq = alloc_workqueue("dlm_callback",
WQ_UNBOUND |
WQ_MEM_RECLAIM |
WQ_NON_REENTRANT,
0);
if (!ls->ls_callback_wq) {
log_print("can't start dlm_callback workqueue");
return -ENOMEM;
}
return 0;
}
void dlm_callback_stop(struct dlm_ls *ls)
{
if (ls->ls_callback_wq)
destroy_workqueue(ls->ls_callback_wq);
}
void dlm_callback_suspend(struct dlm_ls *ls)
{
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
if (ls->ls_callback_wq)
flush_workqueue(ls->ls_callback_wq);
}
void dlm_callback_resume(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
int count = 0;
clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
if (!ls->ls_callback_wq)
return;
mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++;
}
mutex_unlock(&ls->ls_cb_mutex);
log_debug(ls, "dlm_callback_resume %d", count);
}
| gpl-2.0 |
Fusion-Devices/android_kernel_lge_mako | arch/sh/drivers/heartbeat.c | 12876 | 4267 | /*
* Generic heartbeat driver for regular LED banks
*
* Copyright (C) 2007 - 2010 Paul Mundt
*
* Most SH reference boards include a number of individual LEDs that can
* be independently controlled (either via a pre-defined hardware
* function or via the LED class, if desired -- the hardware tends to
* encapsulate some of the same "triggers" that the LED class supports,
* so there's not too much value in it).
*
* Additionally, most of these boards also have a LED bank that we've
* traditionally used for strobing the load average. This use case is
* handled by this driver, rather than giving each LED bit position its
* own struct device.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/heartbeat.h>
#define DRV_NAME "heartbeat"
#define DRV_VERSION "0.1.2"
static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
unsigned bit, unsigned int inverted)
{
unsigned int new;
new = (1 << hd->bit_pos[bit]);
if (inverted)
new = ~new;
new &= hd->mask;
switch (hd->regsize) {
case 32:
new |= ioread32(hd->base) & ~hd->mask;
iowrite32(new, hd->base);
break;
case 16:
new |= ioread16(hd->base) & ~hd->mask;
iowrite16(new, hd->base);
break;
default:
new |= ioread8(hd->base) & ~hd->mask;
iowrite8(new, hd->base);
break;
}
}
static void heartbeat_timer(unsigned long data)
{
struct heartbeat_data *hd = (struct heartbeat_data *)data;
static unsigned bit = 0, up = 1;
heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
bit += up;
if ((bit == 0) || (bit == (hd->nr_bits)-1))
up = -up;
mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) /
((avenrun[0] / 5) + (3 << FSHIFT)))));
}
static int heartbeat_drv_probe(struct platform_device *pdev)
{
struct resource *res;
struct heartbeat_data *hd;
int i;
if (unlikely(pdev->num_resources != 1)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
if (pdev->dev.platform_data) {
hd = pdev->dev.platform_data;
} else {
hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL);
if (unlikely(!hd))
return -ENOMEM;
}
hd->base = ioremap_nocache(res->start, resource_size(res));
if (unlikely(!hd->base)) {
dev_err(&pdev->dev, "ioremap failed\n");
if (!pdev->dev.platform_data)
kfree(hd);
return -ENXIO;
}
if (!hd->nr_bits) {
hd->bit_pos = default_bit_pos;
hd->nr_bits = ARRAY_SIZE(default_bit_pos);
}
hd->mask = 0;
for (i = 0; i < hd->nr_bits; i++)
hd->mask |= (1 << hd->bit_pos[i]);
if (!hd->regsize) {
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
hd->regsize = 32;
break;
case IORESOURCE_MEM_16BIT:
hd->regsize = 16;
break;
case IORESOURCE_MEM_8BIT:
default:
hd->regsize = 8;
break;
}
}
setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
platform_set_drvdata(pdev, hd);
return mod_timer(&hd->timer, jiffies + 1);
}
static int heartbeat_drv_remove(struct platform_device *pdev)
{
struct heartbeat_data *hd = platform_get_drvdata(pdev);
del_timer_sync(&hd->timer);
iounmap(hd->base);
platform_set_drvdata(pdev, NULL);
if (!pdev->dev.platform_data)
kfree(hd);
return 0;
}
static struct platform_driver heartbeat_driver = {
.probe = heartbeat_drv_probe,
.remove = heartbeat_drv_remove,
.driver = {
.name = DRV_NAME,
},
};
static int __init heartbeat_init(void)
{
printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
return platform_driver_register(&heartbeat_driver);
}
static void __exit heartbeat_exit(void)
{
platform_driver_unregister(&heartbeat_driver);
}
module_init(heartbeat_init);
module_exit(heartbeat_exit);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Beeko/android_kernel_samsung_d2 | drivers/media/dvb/dvb-core/dvb_filter.c | 14668 | 12922 | #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include "dvb_filter.h"
#if 0
static unsigned int bitrates[3][16] =
{{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,0},
{0,32,48,56,64,80,96,112,128,160,192,224,256,320,384,0},
{0,32,40,48,56,64,80,96,112,128,160,192,224,256,320,0}};
#endif
static u32 freq[4] = {480, 441, 320, 0};
static unsigned int ac3_bitrates[32] =
{32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
0,0,0,0,0,0,0,0,0,0,0,0,0};
static u32 ac3_frames[3][32] =
{{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
{69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
{96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
#if 0
static void setup_ts2pes(ipack *pa, ipack *pv, u16 *pida, u16 *pidv,
void (*pes_write)(u8 *buf, int count, void *data),
void *priv)
{
dvb_filter_ipack_init(pa, IPACKS, pes_write);
dvb_filter_ipack_init(pv, IPACKS, pes_write);
pa->pid = pida;
pv->pid = pidv;
pa->data = priv;
pv->data = priv;
}
#endif
#if 0
static void ts_to_pes(ipack *p, u8 *buf) // don't need count (=188)
{
u8 off = 0;
if (!buf || !p ){
printk("NULL POINTER IDIOT\n");
return;
}
if (buf[1]&PAY_START) {
if (p->plength == MMAX_PLENGTH-6 && p->found>6){
p->plength = p->found-6;
p->found = 0;
send_ipack(p);
dvb_filter_ipack_reset(p);
}
}
if (buf[3] & ADAPT_FIELD) { // adaptation field?
off = buf[4] + 1;
if (off+4 > 187) return;
}
dvb_filter_instant_repack(buf+4+off, TS_SIZE-4-off, p);
}
#endif
#if 0
/* needs 5 byte input, returns picture coding type*/
static int read_picture_header(u8 *headr, struct mpg_picture *pic, int field, int pr)
{
u8 pct;
if (pr) printk( "Pic header: ");
pic->temporal_reference[field] = (( headr[0] << 2 ) |
(headr[1] & 0x03) )& 0x03ff;
if (pr) printk( " temp ref: 0x%04x", pic->temporal_reference[field]);
pct = ( headr[1] >> 2 ) & 0x07;
pic->picture_coding_type[field] = pct;
if (pr) {
switch(pct){
case I_FRAME:
printk( " I-FRAME");
break;
case B_FRAME:
printk( " B-FRAME");
break;
case P_FRAME:
printk( " P-FRAME");
break;
}
}
pic->vinfo.vbv_delay = (( headr[1] >> 5 ) | ( headr[2] << 3) |
( (headr[3] & 0x1F) << 11) ) & 0xffff;
if (pr) printk( " vbv delay: 0x%04x", pic->vinfo.vbv_delay);
pic->picture_header_parameter = ( headr[3] & 0xe0 ) |
((headr[4] & 0x80) >> 3);
if ( pct == B_FRAME ){
pic->picture_header_parameter |= ( headr[4] >> 3 ) & 0x0f;
}
if (pr) printk( " pic head param: 0x%x",
pic->picture_header_parameter);
return pct;
}
#endif
#if 0
/* needs 4 byte input */
static int read_gop_header(u8 *headr, struct mpg_picture *pic, int pr)
{
if (pr) printk("GOP header: ");
pic->time_code = (( headr[0] << 17 ) | ( headr[1] << 9) |
( headr[2] << 1 ) | (headr[3] &0x01)) & 0x1ffffff;
if (pr) printk(" time: %d:%d.%d ", (headr[0]>>2)& 0x1F,
((headr[0]<<4)& 0x30)| ((headr[1]>>4)& 0x0F),
((headr[1]<<3)& 0x38)| ((headr[2]>>5)& 0x0F));
if ( ( headr[3] & 0x40 ) != 0 ){
pic->closed_gop = 1;
} else {
pic->closed_gop = 0;
}
if (pr) printk("closed: %d", pic->closed_gop);
if ( ( headr[3] & 0x20 ) != 0 ){
pic->broken_link = 1;
} else {
pic->broken_link = 0;
}
if (pr) printk(" broken: %d\n", pic->broken_link);
return 0;
}
#endif
#if 0
/* needs 8 byte input */
static int read_sequence_header(u8 *headr, struct dvb_video_info *vi, int pr)
{
int sw;
int form = -1;
if (pr) printk("Reading sequence header\n");
vi->horizontal_size = ((headr[1] &0xF0) >> 4) | (headr[0] << 4);
vi->vertical_size = ((headr[1] &0x0F) << 8) | (headr[2]);
sw = (int)((headr[3]&0xF0) >> 4) ;
switch( sw ){
case 1:
if (pr)
printk("Videostream: ASPECT: 1:1");
vi->aspect_ratio = 100;
break;
case 2:
if (pr)
printk("Videostream: ASPECT: 4:3");
vi->aspect_ratio = 133;
break;
case 3:
if (pr)
printk("Videostream: ASPECT: 16:9");
vi->aspect_ratio = 177;
break;
case 4:
if (pr)
printk("Videostream: ASPECT: 2.21:1");
vi->aspect_ratio = 221;
break;
case 5 ... 15:
if (pr)
printk("Videostream: ASPECT: reserved");
vi->aspect_ratio = 0;
break;
default:
vi->aspect_ratio = 0;
return -1;
}
if (pr)
printk(" Size = %dx%d",vi->horizontal_size,vi->vertical_size);
sw = (int)(headr[3]&0x0F);
switch ( sw ) {
case 1:
if (pr)
printk(" FRate: 23.976 fps");
vi->framerate = 23976;
form = -1;
break;
case 2:
if (pr)
printk(" FRate: 24 fps");
vi->framerate = 24000;
form = -1;
break;
case 3:
if (pr)
printk(" FRate: 25 fps");
vi->framerate = 25000;
form = VIDEO_MODE_PAL;
break;
case 4:
if (pr)
printk(" FRate: 29.97 fps");
vi->framerate = 29970;
form = VIDEO_MODE_NTSC;
break;
case 5:
if (pr)
printk(" FRate: 30 fps");
vi->framerate = 30000;
form = VIDEO_MODE_NTSC;
break;
case 6:
if (pr)
printk(" FRate: 50 fps");
vi->framerate = 50000;
form = VIDEO_MODE_PAL;
break;
case 7:
if (pr)
printk(" FRate: 60 fps");
vi->framerate = 60000;
form = VIDEO_MODE_NTSC;
break;
}
vi->bit_rate = (headr[4] << 10) | (headr[5] << 2) | (headr[6] & 0x03);
vi->vbv_buffer_size
= (( headr[6] & 0xF8) >> 3 ) | (( headr[7] & 0x1F )<< 5);
if (pr){
printk(" BRate: %d Mbit/s",4*(vi->bit_rate)/10000);
printk(" vbvbuffer %d",16*1024*(vi->vbv_buffer_size));
printk("\n");
}
vi->video_format = form;
return 0;
}
#endif
#if 0
static int get_vinfo(u8 *mbuf, int count, struct dvb_video_info *vi, int pr)
{
u8 *headr;
int found = 0;
int c = 0;
while (found < 4 && c+4 < count){
u8 *b;
b = mbuf+c;
if ( b[0] == 0x00 && b[1] == 0x00 && b[2] == 0x01
&& b[3] == 0xb3) found = 4;
else {
c++;
}
}
if (! found) return -1;
c += 4;
if (c+12 >= count) return -1;
headr = mbuf+c;
if (read_sequence_header(headr, vi, pr) < 0) return -1;
vi->off = c-4;
return 0;
}
#endif
#if 0
static int get_ainfo(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
{
u8 *headr;
int found = 0;
int c = 0;
int fr = 0;
while (found < 2 && c < count){
u8 b[2];
memcpy( b, mbuf+c, 2);
if ( b[0] == 0xff && (b[1] & 0xf8) == 0xf8)
found = 2;
else {
c++;
}
}
if (!found) return -1;
if (c+3 >= count) return -1;
headr = mbuf+c;
ai->layer = (headr[1] & 0x06) >> 1;
if (pr)
printk("Audiostream: Layer: %d", 4-ai->layer);
ai->bit_rate = bitrates[(3-ai->layer)][(headr[2] >> 4 )]*1000;
if (pr){
if (ai->bit_rate == 0)
printk(" Bit rate: free");
else if (ai->bit_rate == 0xf)
printk(" BRate: reserved");
else
printk(" BRate: %d kb/s", ai->bit_rate/1000);
}
fr = (headr[2] & 0x0c ) >> 2;
ai->frequency = freq[fr]*100;
if (pr){
if (ai->frequency == 3)
printk(" Freq: reserved\n");
else
printk(" Freq: %d kHz\n",ai->frequency);
}
ai->off = c;
return 0;
}
#endif
int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
{
u8 *headr;
int found = 0;
int c = 0;
u8 frame = 0;
int fr = 0;
while ( !found && c < count){
u8 *b = mbuf+c;
if ( b[0] == 0x0b && b[1] == 0x77 )
found = 1;
else {
c++;
}
}
if (!found) return -1;
if (pr)
printk("Audiostream: AC3");
ai->off = c;
if (c+5 >= count) return -1;
ai->layer = 0; // 0 for AC3
headr = mbuf+c+2;
frame = (headr[2]&0x3f);
ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
if (pr)
printk(" BRate: %d kb/s", (int) ai->bit_rate/1000);
ai->frequency = (headr[2] & 0xc0 ) >> 6;
fr = (headr[2] & 0xc0 ) >> 6;
ai->frequency = freq[fr]*100;
if (pr) printk (" Freq: %d Hz\n", (int) ai->frequency);
ai->framesize = ac3_frames[fr][frame >> 1];
if ((frame & 1) && (fr == 1)) ai->framesize++;
ai->framesize = ai->framesize << 1;
if (pr) printk (" Framesize %d\n",(int) ai->framesize);
return 0;
}
EXPORT_SYMBOL(dvb_filter_get_ac3info);
#if 0
static u8 *skip_pes_header(u8 **bufp)
{
u8 *inbuf = *bufp;
u8 *buf = inbuf;
u8 *pts = NULL;
int skip = 0;
static const int mpeg1_skip_table[16] = {
1, 0xffff, 5, 10, 0xffff, 0xffff, 0xffff, 0xffff,
0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff
};
if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */
if (buf[7] & PTS_ONLY)
pts = buf+9;
else pts = NULL;
buf = inbuf + 9 + inbuf[8];
} else { /* mpeg1 */
for (buf = inbuf + 6; *buf == 0xff; buf++)
if (buf == inbuf + 6 + 16) {
break;
}
if ((*buf & 0xc0) == 0x40)
buf += 2;
skip = mpeg1_skip_table [*buf >> 4];
if (skip == 5 || skip == 10) pts = buf;
else pts = NULL;
buf += mpeg1_skip_table [*buf >> 4];
}
*bufp = buf;
return pts;
}
#endif
#if 0
static void initialize_quant_matrix( u32 *matrix )
{
int i;
matrix[0] = 0x08101013;
matrix[1] = 0x10131616;
matrix[2] = 0x16161616;
matrix[3] = 0x1a181a1b;
matrix[4] = 0x1b1b1a1a;
matrix[5] = 0x1a1a1b1b;
matrix[6] = 0x1b1d1d1d;
matrix[7] = 0x2222221d;
matrix[8] = 0x1d1d1b1b;
matrix[9] = 0x1d1d2020;
matrix[10] = 0x22222526;
matrix[11] = 0x25232322;
matrix[12] = 0x23262628;
matrix[13] = 0x28283030;
matrix[14] = 0x2e2e3838;
matrix[15] = 0x3a454553;
for ( i = 16 ; i < 32 ; i++ )
matrix[i] = 0x10101010;
}
#endif
#if 0
static void initialize_mpg_picture(struct mpg_picture *pic)
{
int i;
/* set MPEG1 */
pic->mpeg1_flag = 1;
pic->profile_and_level = 0x4A ; /* MP@LL */
pic->progressive_sequence = 1;
pic->low_delay = 0;
pic->sequence_display_extension_flag = 0;
for ( i = 0 ; i < 4 ; i++ ){
pic->frame_centre_horizontal_offset[i] = 0;
pic->frame_centre_vertical_offset[i] = 0;
}
pic->last_frame_centre_horizontal_offset = 0;
pic->last_frame_centre_vertical_offset = 0;
pic->picture_display_extension_flag[0] = 0;
pic->picture_display_extension_flag[1] = 0;
pic->sequence_header_flag = 0;
pic->gop_flag = 0;
pic->sequence_end_flag = 0;
}
#endif
#if 0
static void mpg_set_picture_parameter( int32_t field_type, struct mpg_picture *pic )
{
int16_t last_h_offset;
int16_t last_v_offset;
int16_t *p_h_offset;
int16_t *p_v_offset;
if ( pic->mpeg1_flag ){
pic->picture_structure[field_type] = VIDEO_FRAME_PICTURE;
pic->top_field_first = 0;
pic->repeat_first_field = 0;
pic->progressive_frame = 1;
pic->picture_coding_parameter = 0x000010;
}
/* Reset flag */
pic->picture_display_extension_flag[field_type] = 0;
last_h_offset = pic->last_frame_centre_horizontal_offset;
last_v_offset = pic->last_frame_centre_vertical_offset;
if ( field_type == FIRST_FIELD ){
p_h_offset = pic->frame_centre_horizontal_offset;
p_v_offset = pic->frame_centre_vertical_offset;
*p_h_offset = last_h_offset;
*(p_h_offset + 1) = last_h_offset;
*(p_h_offset + 2) = last_h_offset;
*p_v_offset = last_v_offset;
*(p_v_offset + 1) = last_v_offset;
*(p_v_offset + 2) = last_v_offset;
} else {
pic->frame_centre_horizontal_offset[3] = last_h_offset;
pic->frame_centre_vertical_offset[3] = last_v_offset;
}
}
#endif
#if 0
static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_type)
{
pic->picture_header = 0;
pic->sequence_header_data
= ( INIT_HORIZONTAL_SIZE << 20 )
| ( INIT_VERTICAL_SIZE << 8 )
| ( INIT_ASPECT_RATIO << 4 )
| ( INIT_FRAME_RATE );
pic->mpeg1_flag = 0;
pic->vinfo.horizontal_size
= INIT_DISP_HORIZONTAL_SIZE;
pic->vinfo.vertical_size
= INIT_DISP_VERTICAL_SIZE;
pic->picture_display_extension_flag[field_type]
= 0;
pic->pts_flag[field_type] = 0;
pic->sequence_gop_header = 0;
pic->picture_header = 0;
pic->sequence_header_flag = 0;
pic->gop_flag = 0;
pic->sequence_end_flag = 0;
pic->sequence_display_extension_flag = 0;
pic->last_frame_centre_horizontal_offset = 0;
pic->last_frame_centre_vertical_offset = 0;
pic->channel = chan;
}
#endif
void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
dvb_filter_pes2ts_cb_t *cb, void *priv)
{
unsigned char *buf=p2ts->buf;
buf[0]=0x47;
buf[1]=(pid>>8);
buf[2]=pid&0xff;
p2ts->cc=0;
p2ts->cb=cb;
p2ts->priv=priv;
}
EXPORT_SYMBOL(dvb_filter_pes2ts_init);
int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
int len, int payload_start)
{
unsigned char *buf=p2ts->buf;
int ret=0, rest;
//len=6+((pes[4]<<8)|pes[5]);
if (payload_start)
buf[1]|=0x40;
else
buf[1]&=~0x40;
while (len>=184) {
buf[3]=0x10|((p2ts->cc++)&0x0f);
memcpy(buf+4, pes, 184);
if ((ret=p2ts->cb(p2ts->priv, buf)))
return ret;
len-=184; pes+=184;
buf[1]&=~0x40;
}
if (!len)
return 0;
buf[3]=0x30|((p2ts->cc++)&0x0f);
rest=183-len;
if (rest) {
buf[5]=0x00;
if (rest-1)
memset(buf+6, 0xff, rest-1);
}
buf[4]=rest;
memcpy(buf+5+rest, pes, len);
return p2ts->cb(p2ts->priv, buf);
}
EXPORT_SYMBOL(dvb_filter_pes2ts);
| gpl-2.0 |
felipesanches/linux-media | drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c | 77 | 3168 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "priv.h"
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
#else
static inline bool
nouveau_acpi_rom_supported(struct pci_dev *pdev)
{
return false;
}
static inline int
nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return -EINVAL;
}
#endif
/* This version of the shadow function disobeys the ACPI spec and tries
* to fetch in units of more than 4KiB at a time. This is a LOT faster
* on some systems, such as Lenovo W530.
*/
static u32
acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0x00000fff;
u32 fetch = limit - start;
if (nvbios_extend(bios, limit) > 0) {
int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
if (ret == fetch)
return fetch;
}
return 0;
}
/* Other systems, such as the one in fdo#55948, will report a success
* but only return 4KiB of data. The common bios fetching logic will
* detect an invalid image, and fall back to this version of the read
* function.
*/
static u32
acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
{
u32 limit = (offset + length + 0xfff) & ~0xfff;
u32 start = offset & ~0xfff;
u32 fetch = 0;
if (nvbios_extend(bios, limit) > 0) {
while (start + fetch < limit) {
int ret = nouveau_acpi_get_bios_chunk(bios->data,
start + fetch,
0x1000);
if (ret != 0x1000)
break;
fetch += 0x1000;
}
}
return fetch;
}
static void *
acpi_init(struct nouveau_bios *bios, const char *name)
{
if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
return ERR_PTR(-ENODEV);
return NULL;
}
const struct nvbios_source
nvbios_acpi_fast = {
.name = "ACPI",
.init = acpi_init,
.read = acpi_read_fast,
.rw = false,
};
const struct nvbios_source
nvbios_acpi_slow = {
.name = "ACPI",
.init = acpi_init,
.read = acpi_read_slow,
.rw = false,
};
| gpl-2.0 |
XPerience-AOSP-Lollipop/android_kernel_xiaomi_cancro | drivers/staging/prima/CORE/HDD/src/wlan_hdd_cfg.c | 77 | 272158 | /*
* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**=========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
$Header:$ $DateTime: $ $Author: $
when who what, where, why
-------- --- --------------------------------------------------------
07/27/09 kanand Created module.
==========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include <linux/firmware.h>
#include <linux/string.h>
#include <wlan_hdd_includes.h>
#include <wlan_hdd_main.h>
#include <wlan_hdd_assoc.h>
#include <wlan_hdd_cfg.h>
#include <linux/string.h>
#include <vos_types.h>
#include <csrApi.h>
#include <pmcApi.h>
#include <wlan_hdd_misc.h>
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
static void cbNotifySetRoamPrefer5GHz(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateRoamPrefer5GHz((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nRoamPrefer5GHz);
}
static void cbNotifySetImmediateRoamRssiDiff(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateImmediateRoamRssiDiff((tHalHandle)(pHddCtx->hHal),
pHddCtx->cfg_ini->nImmediateRoamRssiDiff);
}
static void cbNotifySetRoamRssiDiff(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateRoamRssiDiff((tHalHandle)(pHddCtx->hHal),
pHddCtx->cfg_ini->RoamRssiDiff);
}
static void cbNotifySetFastTransitionEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateFastTransitionEnabled((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->isFastTransitionEnabled);
}
static void cbNotifySetRoamIntraBand(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_setRoamIntraBand((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nRoamIntraBand);
}
static void cbNotifySetWESMode(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_UpdateWESMode((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->isWESModeEnabled);
}
static void cbNotifySetRoamScanNProbes(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateRoamScanNProbes((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nProbes);
}
static void cbNotifySetRoamScanHomeAwayTime(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateRoamScanHomeAwayTime((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nRoamScanHomeAwayTime, eANI_BOOLEAN_TRUE);
}
#endif
#ifdef FEATURE_WLAN_OKC
static void cbNotifySetOkcFeatureEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
}
#endif
#ifdef FEATURE_WLAN_LFR
static void NotifyIsFastRoamIniFeatureEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_UpdateIsFastRoamIniFeatureEnabled((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->isFastRoamIniFeatureEnabled );
}
static void NotifyIsMAWCIniFeatureEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
/* at the point this routine is called, the value in the cfg_ini table has already been updated */
sme_UpdateIsMAWCIniFeatureEnabled((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->MAWCEnabled );
}
#endif
#ifdef FEATURE_WLAN_ESE
static void cbNotifySetEseFeatureEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_UpdateIsEseFeatureEnabled((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->isEseIniFeatureEnabled );
}
#endif
static void cbNotifySetFwRssiMonitoring(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_UpdateConfigFwRssiMonitoring((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->fEnableFwRssiMonitoring );
}
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
static void cbNotifySetNeighborLookupRssiThreshold(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_setNeighborLookupRssiThreshold((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nNeighborLookupRssiThreshold );
}
static void cbNotifySetNeighborScanPeriod(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_setNeighborScanPeriod((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nNeighborScanPeriod );
}
static void cbNotifySetNeighborResultsRefreshPeriod(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_setNeighborScanRefreshPeriod((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nNeighborResultsRefreshPeriod );
}
static void cbNotifySetEmptyScanRefreshPeriod(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_UpdateEmptyScanRefreshPeriod((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nEmptyScanRefreshPeriod);
}
static void cbNotifySetNeighborScanMinChanTime(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
// at the point this routine is called, the value in the cfg_ini table has already been updated
sme_setNeighborScanMinChanTime((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nNeighborScanMinChanTime);
}
static void cbNotifySetNeighborScanMaxChanTime(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_setNeighborScanMaxChanTime((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->nNeighborScanMaxChanTime);
}
#endif
static void cbNotifySetEnableSSR(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateEnableSSR((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->enableSSR);
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
static void cbNotifyUpdateRoamScanOffloadEnabled(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateRoamScanOffloadEnabled((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->isRoamOffloadScanEnabled);
if (0 == pHddCtx->cfg_ini->isRoamOffloadScanEnabled)
{
pHddCtx->cfg_ini->bFastRoamInConIniFeatureEnabled = 0;
sme_UpdateEnableFastRoamInConcurrency((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->bFastRoamInConIniFeatureEnabled );
}
}
static void cbNotifySetEnableFastRoamInConcurrency(hdd_context_t *pHddCtx, unsigned long NotifyId)
{
sme_UpdateEnableFastRoamInConcurrency((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->bFastRoamInConIniFeatureEnabled );
}
#endif
REG_TABLE_ENTRY g_registry_table[] =
{
REG_VARIABLE( CFG_RTS_THRESHOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, RTSThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RTS_THRESHOLD_DEFAULT,
CFG_RTS_THRESHOLD_MIN,
CFG_RTS_THRESHOLD_MAX ),
REG_VARIABLE( CFG_FRAG_THRESHOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, FragmentationThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FRAG_THRESHOLD_DEFAULT,
CFG_FRAG_THRESHOLD_MIN,
CFG_FRAG_THRESHOLD_MAX ),
REG_VARIABLE( CFG_CALIBRATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, Calibration,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_CALIBRATION_DEFAULT,
CFG_CALIBRATION_MIN,
CFG_CALIBRATION_MAX ),
REG_VARIABLE( CFG_CALIBRATION_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, CalibrationPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_CALIBRATION_PERIOD_DEFAULT,
CFG_CALIBRATION_PERIOD_MIN,
CFG_CALIBRATION_PERIOD_MAX ),
REG_VARIABLE( CFG_OPERATING_CHANNEL_NAME, WLAN_PARAM_Integer,
hdd_config_t, OperatingChannel,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OPERATING_CHANNEL_DEFAULT,
CFG_OPERATING_CHANNEL_MIN,
CFG_OPERATING_CHANNEL_MAX ),
REG_VARIABLE( CFG_SHORT_SLOT_TIME_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, ShortSlotTimeEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SHORT_SLOT_TIME_ENABLED_DEFAULT,
CFG_SHORT_SLOT_TIME_ENABLED_MIN,
CFG_SHORT_SLOT_TIME_ENABLED_MAX ),
REG_VARIABLE( CFG_11D_SUPPORT_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, Is11dSupportEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_11D_SUPPORT_ENABLED_DEFAULT,
CFG_11D_SUPPORT_ENABLED_MIN,
CFG_11D_SUPPORT_ENABLED_MAX ),
REG_VARIABLE( CFG_11H_SUPPORT_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, Is11hSupportEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_11H_SUPPORT_ENABLED_DEFAULT,
CFG_11H_SUPPORT_ENABLED_MIN,
CFG_11H_SUPPORT_ENABLED_MAX ),
REG_VARIABLE( CFG_ENFORCE_11D_CHANNELS_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnforce11dChannels,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_ENFORCE_11D_CHANNELS_DEFAULT,
CFG_ENFORCE_11D_CHANNELS_MIN,
CFG_ENFORCE_11D_CHANNELS_MAX ),
REG_VARIABLE( CFG_COUNTRY_CODE_PRIORITY_NAME, WLAN_PARAM_Integer,
hdd_config_t, fSupplicantCountryCodeHasPriority,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_COUNTRY_CODE_PRIORITY_DEFAULT,
CFG_COUNTRY_CODE_PRIORITY_MIN,
CFG_COUNTRY_CODE_PRIORITY_MAX),
REG_VARIABLE( CFG_ENFORCE_COUNTRY_CODE_MATCH_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnforceCountryCodeMatch,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_ENFORCE_COUNTRY_CODE_MATCH_DEFAULT,
CFG_ENFORCE_COUNTRY_CODE_MATCH_MIN,
CFG_ENFORCE_COUNTRY_CODE_MATCH_MAX ),
REG_VARIABLE( CFG_ENFORCE_DEFAULT_DOMAIN_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnforceDefaultDomain,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_ENFORCE_DEFAULT_DOMAIN_DEFAULT,
CFG_ENFORCE_DEFAULT_DOMAIN_MIN,
CFG_ENFORCE_DEFAULT_DOMAIN_MAX ),
REG_VARIABLE( CFG_GENERIC_ID1_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg1Id,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_ID1_DEFAULT,
CFG_GENERIC_ID1_MIN,
CFG_GENERIC_ID1_MAX ),
REG_VARIABLE( CFG_GENERIC_ID2_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg2Id,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_ID2_DEFAULT,
CFG_GENERIC_ID2_MIN,
CFG_GENERIC_ID2_MAX ),
REG_VARIABLE( CFG_GENERIC_ID3_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg3Id,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_ID3_DEFAULT,
CFG_GENERIC_ID3_MIN,
CFG_GENERIC_ID3_MAX ),
REG_VARIABLE( CFG_GENERIC_ID4_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg4Id,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_ID4_DEFAULT,
CFG_GENERIC_ID4_MIN,
CFG_GENERIC_ID4_MAX ),
REG_VARIABLE( CFG_GENERIC_ID5_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg5Id,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_ID5_DEFAULT,
CFG_GENERIC_ID5_MIN,
CFG_GENERIC_ID5_MAX ),
REG_VARIABLE( CFG_GENERIC_VALUE1_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg1Value,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_VALUE1_DEFAULT,
CFG_GENERIC_VALUE1_MIN,
CFG_GENERIC_VALUE1_MAX ),
REG_VARIABLE( CFG_GENERIC_VALUE2_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg2Value,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_VALUE2_DEFAULT,
CFG_GENERIC_VALUE2_MIN,
CFG_GENERIC_VALUE2_MAX ),
REG_VARIABLE( CFG_GENERIC_VALUE3_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg3Value,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_VALUE3_DEFAULT,
CFG_GENERIC_VALUE3_MIN,
CFG_GENERIC_VALUE3_MAX ),
REG_VARIABLE( CFG_GENERIC_VALUE4_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg4Value,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_VALUE4_DEFAULT,
CFG_GENERIC_VALUE4_MIN,
CFG_GENERIC_VALUE4_MAX ),
REG_VARIABLE( CFG_GENERIC_VALUE5_NAME, WLAN_PARAM_Integer,
hdd_config_t, Cfg5Value,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GENERIC_VALUE5_DEFAULT,
CFG_GENERIC_VALUE5_MIN,
CFG_GENERIC_VALUE5_MAX ),
REG_VARIABLE( CFG_HEARTBEAT_THRESH_24_NAME, WLAN_PARAM_Integer,
hdd_config_t, HeartbeatThresh24,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_HEARTBEAT_THRESH_24_DEFAULT,
CFG_HEARTBEAT_THRESH_24_MIN,
CFG_HEARTBEAT_THRESH_24_MAX ),
REG_VARIABLE_STRING( CFG_POWER_USAGE_NAME, WLAN_PARAM_String,
hdd_config_t, PowerUsageControl,
VAR_FLAGS_OPTIONAL,
(void *)CFG_POWER_USAGE_DEFAULT ),
REG_VARIABLE( CFG_ENABLE_SUSPEND_NAME, WLAN_PARAM_Integer,
hdd_config_t, nEnableSuspend,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_SUSPEND_DEFAULT,
CFG_ENABLE_SUSPEND_MIN,
CFG_ENABLE_SUSPEND_MAX ),
REG_VARIABLE( CFG_ENABLE_ENABLE_DRIVER_STOP_NAME, WLAN_PARAM_Integer,
hdd_config_t, nEnableDriverStop,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_ENABLE_DRIVER_STOP_DEFAULT,
CFG_ENABLE_ENABLE_DRIVER_STOP_MIN,
CFG_ENABLE_ENABLE_DRIVER_STOP_MAX ),
REG_VARIABLE( CFG_ENABLE_IMPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsImpsEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_IMPS_DEFAULT,
CFG_ENABLE_IMPS_MIN,
CFG_ENABLE_IMPS_MAX ),
REG_VARIABLE( CFG_SSR_PANIC_ON_FAILURE_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsSsrPanicOnFailure,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SSR_PANIC_ON_FAILURE_DEFAULT,
CFG_SSR_PANIC_ON_FAILURE_MIN,
CFG_SSR_PANIC_ON_FAILURE_MAX),
REG_VARIABLE( CFG_ENABLE_LOGP_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsLogpEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_LOGP_DEFAULT,
CFG_ENABLE_LOGP_MIN,
CFG_ENABLE_LOGP_MAX ),
REG_VARIABLE( CFG_IMPS_MINIMUM_SLEEP_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nImpsMinSleepTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IMPS_MINIMUM_SLEEP_TIME_DEFAULT,
CFG_IMPS_MINIMUM_SLEEP_TIME_MIN,
CFG_IMPS_MINIMUM_SLEEP_TIME_MAX ),
REG_VARIABLE( CFG_IMPS_MAXIMUM_SLEEP_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nImpsMaxSleepTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IMPS_MAXIMUM_SLEEP_TIME_DEFAULT,
CFG_IMPS_MAXIMUM_SLEEP_TIME_MIN,
CFG_IMPS_MAXIMUM_SLEEP_TIME_MAX ),
REG_VARIABLE( CFG_DEFER_SCAN_TIME_INTERVAL, WLAN_PARAM_Integer,
hdd_config_t, nDeferScanTimeInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DEFER_SCAN_TIME_INTERVAL_DEFAULT,
CFG_DEFER_SCAN_TIME_INTERVAL_MIN,
CFG_DEFER_SCAN_TIME_INTERVAL_MAX ),
REG_VARIABLE( CFG_IMPS_MODERATE_SLEEP_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nImpsModSleepTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IMPS_MODERATE_SLEEP_TIME_DEFAULT,
CFG_IMPS_MODERATE_SLEEP_TIME_MIN,
CFG_IMPS_MODERATE_SLEEP_TIME_MAX ),
REG_VARIABLE( CFG_ENABLE_BMPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsBmpsEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_BMPS_DEFAULT,
CFG_ENABLE_BMPS_MIN,
CFG_ENABLE_BMPS_MAX ),
REG_VARIABLE( CFG_BMPS_MINIMUM_LI_NAME, WLAN_PARAM_Integer,
hdd_config_t, nBmpsMinListenInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BMPS_MINIMUM_LI_DEFAULT,
CFG_BMPS_MINIMUM_LI_MIN,
CFG_BMPS_MINIMUM_LI_MAX ),
REG_VARIABLE( CFG_BMPS_MAXIMUM_LI_NAME, WLAN_PARAM_Integer,
hdd_config_t, nBmpsMaxListenInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BMPS_MAXIMUM_LI_DEFAULT,
CFG_BMPS_MAXIMUM_LI_MIN,
CFG_BMPS_MAXIMUM_LI_MAX ),
REG_VARIABLE( CFG_BMPS_MODERATE_LI_NAME, WLAN_PARAM_Integer,
hdd_config_t, nBmpsModListenInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BMPS_MODERATE_LI_DEFAULT,
CFG_BMPS_MODERATE_LI_MIN,
CFG_BMPS_MODERATE_LI_MAX ),
REG_VARIABLE( CFG_ENABLE_AUTO_BMPS_TIMER_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsAutoBmpsTimerEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_AUTO_BMPS_TIMER_DEFAULT,
CFG_ENABLE_AUTO_BMPS_TIMER_MIN,
CFG_ENABLE_AUTO_BMPS_TIMER_MAX ),
REG_VARIABLE( CFG_AUTO_BMPS_TIMER_VALUE_NAME, WLAN_PARAM_Integer,
hdd_config_t, nAutoBmpsTimerValue,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AUTO_BMPS_TIMER_VALUE_DEFAULT,
CFG_AUTO_BMPS_TIMER_VALUE_MIN,
CFG_AUTO_BMPS_TIMER_VALUE_MAX ),
REG_VARIABLE( CFG_DOT11_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, dot11Mode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_DOT11_MODE_DEFAULT,
CFG_DOT11_MODE_MIN,
CFG_DOT11_MODE_MAX ),
REG_VARIABLE( CFG_CHANNEL_BONDING_MODE_24GHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, nChannelBondingMode24GHz,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_CHANNEL_BONDING_MODE_DEFAULT,
CFG_CHANNEL_BONDING_MODE_MIN,
CFG_CHANNEL_BONDING_MODE_MAX),
REG_VARIABLE( CFG_CHANNEL_BONDING_MODE_5GHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, nChannelBondingMode5GHz,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_CHANNEL_BONDING_MODE_DEFAULT,
CFG_CHANNEL_BONDING_MODE_MIN,
CFG_CHANNEL_BONDING_MODE_MAX),
REG_VARIABLE( CFG_MAX_RX_AMPDU_FACTOR_NAME, WLAN_PARAM_Integer,
hdd_config_t, MaxRxAmpduFactor,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK ,
CFG_MAX_RX_AMPDU_FACTOR_DEFAULT,
CFG_MAX_RX_AMPDU_FACTOR_MIN,
CFG_MAX_RX_AMPDU_FACTOR_MAX),
REG_VARIABLE( CFG_FIXED_RATE_NAME, WLAN_PARAM_Integer,
hdd_config_t, TxRate,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_FIXED_RATE_DEFAULT,
CFG_FIXED_RATE_MIN,
CFG_FIXED_RATE_MAX ),
REG_VARIABLE( CFG_SHORT_GI_20MHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, ShortGI20MhzEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SHORT_GI_20MHZ_DEFAULT,
CFG_SHORT_GI_20MHZ_MIN,
CFG_SHORT_GI_20MHZ_MAX ),
REG_VARIABLE( CFG_BLOCK_ACK_AUTO_SETUP_NAME, WLAN_PARAM_Integer,
hdd_config_t, BlockAckAutoSetup,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_BLOCK_ACK_AUTO_SETUP_DEFAULT,
CFG_BLOCK_ACK_AUTO_SETUP_MIN,
CFG_BLOCK_ACK_AUTO_SETUP_MAX ),
REG_VARIABLE( CFG_SCAN_RESULT_AGE_COUNT_NAME, WLAN_PARAM_Integer,
hdd_config_t, ScanResultAgeCount,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SCAN_RESULT_AGE_COUNT_DEFAULT,
CFG_SCAN_RESULT_AGE_COUNT_MIN,
CFG_SCAN_RESULT_AGE_COUNT_MAX ),
REG_VARIABLE( CFG_SCAN_RESULT_AGE_TIME_NCNPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nScanAgeTimeNCNPS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SCAN_RESULT_AGE_TIME_NCNPS_DEFAULT,
CFG_SCAN_RESULT_AGE_TIME_NCNPS_MIN,
CFG_SCAN_RESULT_AGE_TIME_NCNPS_MAX ),
REG_VARIABLE( CFG_SCAN_RESULT_AGE_TIME_NCPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nScanAgeTimeNCPS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SCAN_RESULT_AGE_TIME_NCPS_DEFAULT,
CFG_SCAN_RESULT_AGE_TIME_NCPS_MIN,
CFG_SCAN_RESULT_AGE_TIME_NCPS_MAX ),
REG_VARIABLE( CFG_SCAN_RESULT_AGE_TIME_CNPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nScanAgeTimeCNPS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SCAN_RESULT_AGE_TIME_CNPS_DEFAULT,
CFG_SCAN_RESULT_AGE_TIME_CNPS_MIN,
CFG_SCAN_RESULT_AGE_TIME_CNPS_MAX ),
REG_VARIABLE( CFG_SCAN_RESULT_AGE_TIME_CPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nScanAgeTimeCPS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SCAN_RESULT_AGE_TIME_CPS_DEFAULT,
CFG_SCAN_RESULT_AGE_TIME_CPS_MIN,
CFG_SCAN_RESULT_AGE_TIME_CPS_MAX ),
REG_VARIABLE( CFG_RSSI_CATEGORY_GAP_NAME, WLAN_PARAM_Integer,
hdd_config_t, nRssiCatGap,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RSSI_CATEGORY_GAP_DEFAULT,
CFG_RSSI_CATEGORY_GAP_MIN,
CFG_RSSI_CATEGORY_GAP_MAX ),
REG_VARIABLE( CFG_SHORT_PREAMBLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsShortPreamble,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SHORT_PREAMBLE_DEFAULT,
CFG_SHORT_PREAMBLE_MIN,
CFG_SHORT_PREAMBLE_MAX ),
REG_VARIABLE( CFG_IBSS_AUTO_BSSID_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsAutoIbssBssid,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IBSS_AUTO_BSSID_DEFAULT,
CFG_IBSS_AUTO_BSSID_MIN,
CFG_IBSS_AUTO_BSSID_MAX ),
REG_VARIABLE_STRING( CFG_IBSS_BSSID_NAME, WLAN_PARAM_MacAddr,
hdd_config_t, IbssBssid,
VAR_FLAGS_OPTIONAL,
(void *)CFG_IBSS_BSSID_DEFAULT ),
REG_VARIABLE_STRING( CFG_INTF0_MAC_ADDR_NAME, WLAN_PARAM_MacAddr,
hdd_config_t, intfMacAddr[0],
VAR_FLAGS_OPTIONAL,
(void *)CFG_INTF0_MAC_ADDR_DEFAULT ),
REG_VARIABLE_STRING( CFG_INTF1_MAC_ADDR_NAME, WLAN_PARAM_MacAddr,
hdd_config_t, intfMacAddr[1],
VAR_FLAGS_OPTIONAL,
(void *)CFG_INTF1_MAC_ADDR_DEFAULT ),
REG_VARIABLE_STRING( CFG_INTF2_MAC_ADDR_NAME, WLAN_PARAM_MacAddr,
hdd_config_t, intfMacAddr[2],
VAR_FLAGS_OPTIONAL,
(void *)CFG_INTF2_MAC_ADDR_DEFAULT ),
REG_VARIABLE_STRING( CFG_INTF3_MAC_ADDR_NAME, WLAN_PARAM_MacAddr,
hdd_config_t, intfMacAddr[3],
VAR_FLAGS_OPTIONAL,
(void *)CFG_INTF3_MAC_ADDR_DEFAULT ),
REG_VARIABLE( CFG_AP_QOS_UAPSD_MODE_NAME , WLAN_PARAM_Integer,
hdd_config_t, apUapsdEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_QOS_UAPSD_MODE_DEFAULT,
CFG_AP_QOS_UAPSD_MODE_MIN,
CFG_AP_QOS_UAPSD_MODE_MAX ),
REG_VARIABLE_STRING( CFG_AP_COUNTRY_CODE, WLAN_PARAM_String,
hdd_config_t, apCntryCode,
VAR_FLAGS_OPTIONAL,
(void *)CFG_AP_COUNTRY_CODE_DEFAULT ),
REG_VARIABLE( CFG_AP_ENABLE_RANDOM_BSSID_NAME, WLAN_PARAM_Integer,
hdd_config_t, apRandomBssidEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_ENABLE_RANDOM_BSSID_DEFAULT,
CFG_AP_ENABLE_RANDOM_BSSID_MIN,
CFG_AP_ENABLE_RANDOM_BSSID_MAX ),
REG_VARIABLE( CFG_AP_ENABLE_PROTECTION_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, apProtEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_ENABLE_PROTECTION_MODE_DEFAULT,
CFG_AP_ENABLE_PROTECTION_MODE_MIN,
CFG_AP_ENABLE_PROTECTION_MODE_MAX ),
REG_VARIABLE( CFG_AP_PROTECTION_MODE_NAME, WLAN_PARAM_HexInteger,
hdd_config_t, apProtection,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_PROTECTION_MODE_DEFAULT,
CFG_AP_PROTECTION_MODE_MIN,
CFG_AP_PROTECTION_MODE_MAX ),
REG_VARIABLE( CFG_AP_OBSS_PROTECTION_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, apOBSSProtEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_OBSS_PROTECTION_MODE_DEFAULT,
CFG_AP_OBSS_PROTECTION_MODE_MIN,
CFG_AP_OBSS_PROTECTION_MODE_MAX ),
REG_VARIABLE( CFG_AP_STA_SECURITY_SEPERATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, apDisableIntraBssFwd,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_STA_SECURITY_SEPERATION_DEFAULT,
CFG_AP_STA_SECURITY_SEPERATION_MIN,
CFG_AP_STA_SECURITY_SEPERATION_MAX ),
REG_VARIABLE( CFG_FRAMES_PROCESSING_TH_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, MinFramesProcThres,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FRAMES_PROCESSING_TH_DEFAULT,
CFG_FRAMES_PROCESSING_TH_MIN,
CFG_FRAMES_PROCESSING_TH_MAX ),
REG_VARIABLE(CFG_SAP_CHANNEL_SELECT_START_CHANNEL , WLAN_PARAM_Integer,
hdd_config_t, apStartChannelNum,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAP_CHANNEL_SELECT_START_CHANNEL_DEFAULT,
CFG_SAP_CHANNEL_SELECT_START_CHANNEL_MIN,
CFG_SAP_CHANNEL_SELECT_START_CHANNEL_MAX ),
REG_VARIABLE(CFG_SAP_CHANNEL_SELECT_END_CHANNEL , WLAN_PARAM_Integer,
hdd_config_t, apEndChannelNum,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAP_CHANNEL_SELECT_END_CHANNEL_DEFAULT,
CFG_SAP_CHANNEL_SELECT_END_CHANNEL_MIN,
CFG_SAP_CHANNEL_SELECT_END_CHANNEL_MAX ),
REG_VARIABLE(CFG_SAP_CHANNEL_SELECT_OPERATING_BAND , WLAN_PARAM_Integer,
hdd_config_t, apOperatingBand,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAP_CHANNEL_SELECT_OPERATING_BAND_DEFAULT,
CFG_SAP_CHANNEL_SELECT_OPERATING_BAND_MIN,
CFG_SAP_CHANNEL_SELECT_OPERATING_BAND_MAX ),
REG_VARIABLE(CFG_SAP_AUTO_CHANNEL_SELECTION_NAME , WLAN_PARAM_Integer,
hdd_config_t, apAutoChannelSelection,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAP_AUTO_CHANNEL_SELECTION_DEFAULT,
CFG_SAP_AUTO_CHANNEL_SELECTION_MIN,
CFG_SAP_AUTO_CHANNEL_SELECTION_MAX ),
REG_VARIABLE(CFG_ENABLE_LTE_COEX , WLAN_PARAM_Integer,
hdd_config_t, enableLTECoex,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_LTE_COEX_DEFAULT,
CFG_ENABLE_LTE_COEX_MIN,
CFG_ENABLE_LTE_COEX_MAX ),
REG_VARIABLE( CFG_AP_KEEP_ALIVE_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, apKeepAlivePeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_KEEP_ALIVE_PERIOD_DEFAULT,
CFG_AP_KEEP_ALIVE_PERIOD_MIN,
CFG_AP_KEEP_ALIVE_PERIOD_MAX),
REG_VARIABLE( CFG_GO_KEEP_ALIVE_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, goKeepAlivePeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GO_KEEP_ALIVE_PERIOD_DEFAULT,
CFG_GO_KEEP_ALIVE_PERIOD_MIN,
CFG_GO_KEEP_ALIVE_PERIOD_MAX),
REG_VARIABLE( CFG_AP_LINK_MONITOR_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, apLinkMonitorPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_LINK_MONITOR_PERIOD_DEFAULT,
CFG_AP_LINK_MONITOR_PERIOD_MIN,
CFG_AP_LINK_MONITOR_PERIOD_MAX),
REG_VARIABLE( CFG_GO_LINK_MONITOR_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, goLinkMonitorPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_GO_LINK_MONITOR_PERIOD_DEFAULT,
CFG_GO_LINK_MONITOR_PERIOD_MIN,
CFG_GO_LINK_MONITOR_PERIOD_MAX),
REG_VARIABLE(CFG_DISABLE_PACKET_FILTER , WLAN_PARAM_Integer,
hdd_config_t, disablePacketFilter,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DISABLE_PACKET_FILTER_DEFAULT,
CFG_DISABLE_PACKET_FILTER_MIN,
CFG_DISABLE_PACKET_FILTER_MAX ),
REG_VARIABLE( CFG_BEACON_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, nBeaconInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_BEACON_INTERVAL_DEFAULT,
CFG_BEACON_INTERVAL_MIN,
CFG_BEACON_INTERVAL_MAX ),
REG_VARIABLE( CFG_ENABLE_IDLE_SCAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, nEnableIdleScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_IDLE_SCAN_DEFAULT,
CFG_ENABLE_IDLE_SCAN_MIN,
CFG_ENABLE_IDLE_SCAN_MAX ),
REG_VARIABLE( CFG_ROAMING_TIME_NAME , WLAN_PARAM_Integer,
hdd_config_t, nRoamingTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAMING_TIME_DEFAULT,
CFG_ROAMING_TIME_MIN,
CFG_ROAMING_TIME_MAX ),
REG_VARIABLE( CFG_VCC_RSSI_TRIGGER_NAME , WLAN_PARAM_Integer,
hdd_config_t, nVccRssiTrigger,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VCC_RSSI_TRIGGER_DEFAULT,
CFG_VCC_RSSI_TRIGGER_MIN,
CFG_VCC_RSSI_TRIGGER_MAX ),
REG_VARIABLE( CFG_VCC_UL_MAC_LOSS_THRESH_NAME , WLAN_PARAM_Integer,
hdd_config_t, nVccUlMacLossThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VCC_UL_MAC_LOSS_THRESH_DEFAULT,
CFG_VCC_UL_MAC_LOSS_THRESH_MIN,
CFG_VCC_UL_MAC_LOSS_THRESH_MAX ),
REG_VARIABLE( CFG_PASSIVE_MAX_CHANNEL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nPassiveMaxChnTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PASSIVE_MAX_CHANNEL_TIME_DEFAULT,
CFG_PASSIVE_MAX_CHANNEL_TIME_MIN,
CFG_PASSIVE_MAX_CHANNEL_TIME_MAX ),
REG_VARIABLE( CFG_PASSIVE_MIN_CHANNEL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nPassiveMinChnTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PASSIVE_MIN_CHANNEL_TIME_DEFAULT,
CFG_PASSIVE_MIN_CHANNEL_TIME_MIN,
CFG_PASSIVE_MIN_CHANNEL_TIME_MAX ),
REG_VARIABLE( CFG_ACTIVE_MAX_CHANNEL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMaxChnTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_MIN,
CFG_ACTIVE_MAX_CHANNEL_TIME_MAX ),
REG_VARIABLE( CFG_ACTIVE_MIN_CHANNEL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMinChnTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_MIN,
CFG_ACTIVE_MIN_CHANNEL_TIME_MAX ),
REG_VARIABLE( CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMaxChnTimeBtc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MIN,
CFG_ACTIVE_MAX_CHANNEL_TIME_BTC_MAX ),
REG_VARIABLE( CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMinChnTimeBtc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MIN,
CFG_ACTIVE_MIN_CHANNEL_TIME_BTC_MAX ),
REG_VARIABLE( CFG_RETRY_LIMIT_ZERO_NAME, WLAN_PARAM_Integer,
hdd_config_t, retryLimitZero,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RETRY_LIMIT_ZERO_DEFAULT,
CFG_RETRY_LIMIT_ZERO_MIN,
CFG_RETRY_LIMIT_ZERO_MAX ),
REG_VARIABLE( CFG_RETRY_LIMIT_ONE_NAME, WLAN_PARAM_Integer,
hdd_config_t, retryLimitOne,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RETRY_LIMIT_ONE_DEFAULT,
CFG_RETRY_LIMIT_ONE_MIN,
CFG_RETRY_LIMIT_ONE_MAX ),
REG_VARIABLE( CFG_RETRY_LIMIT_TWO_NAME, WLAN_PARAM_Integer,
hdd_config_t, retryLimitTwo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RETRY_LIMIT_TWO_DEFAULT,
CFG_RETRY_LIMIT_TWO_MIN,
CFG_RETRY_LIMIT_TWO_MAX ),
REG_VARIABLE( CFG_DISABLE_AGG_WITH_BTC_NAME, WLAN_PARAM_Integer,
hdd_config_t, disableAggWithBtc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DISABLE_AGG_WITH_BTC_DEFAULT,
CFG_DISABLE_AGG_WITH_BTC_MIN,
CFG_DISABLE_AGG_WITH_BTC_MAX ),
#ifdef WLAN_AP_STA_CONCURRENCY
REG_VARIABLE( CFG_PASSIVE_MAX_CHANNEL_TIME_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nPassiveMaxChnTimeConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PASSIVE_MAX_CHANNEL_TIME_CONC_DEFAULT,
CFG_PASSIVE_MAX_CHANNEL_TIME_CONC_MIN,
CFG_PASSIVE_MAX_CHANNEL_TIME_CONC_MAX ),
REG_VARIABLE( CFG_PASSIVE_MIN_CHANNEL_TIME_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nPassiveMinChnTimeConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PASSIVE_MIN_CHANNEL_TIME_CONC_DEFAULT,
CFG_PASSIVE_MIN_CHANNEL_TIME_CONC_MIN,
CFG_PASSIVE_MIN_CHANNEL_TIME_CONC_MAX ),
REG_VARIABLE( CFG_ACTIVE_MAX_CHANNEL_TIME_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMaxChnTimeConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_CONC_DEFAULT,
CFG_ACTIVE_MAX_CHANNEL_TIME_CONC_MIN,
CFG_ACTIVE_MAX_CHANNEL_TIME_CONC_MAX ),
REG_VARIABLE( CFG_ACTIVE_MIN_CHANNEL_TIME_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nActiveMinChnTimeConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_CONC_DEFAULT,
CFG_ACTIVE_MIN_CHANNEL_TIME_CONC_MIN,
CFG_ACTIVE_MIN_CHANNEL_TIME_CONC_MAX ),
REG_VARIABLE( CFG_REST_TIME_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nRestTimeConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REST_TIME_CONC_DEFAULT,
CFG_REST_TIME_CONC_MIN,
CFG_REST_TIME_CONC_MAX ),
REG_VARIABLE( CFG_NUM_STA_CHAN_COMBINED_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNumStaChanCombinedConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NUM_STA_CHAN_COMBINED_CONC_DEFAULT,
CFG_NUM_STA_CHAN_COMBINED_CONC_MIN,
CFG_NUM_STA_CHAN_COMBINED_CONC_MAX ),
REG_VARIABLE( CFG_NUM_P2P_CHAN_COMBINED_CONC_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNumP2PChanCombinedConc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NUM_P2P_CHAN_COMBINED_CONC_DEFAULT,
CFG_NUM_P2P_CHAN_COMBINED_CONC_MIN,
CFG_NUM_P2P_CHAN_COMBINED_CONC_MAX ),
#endif
REG_VARIABLE( CFG_MAX_PS_POLL_NAME, WLAN_PARAM_Integer,
hdd_config_t, nMaxPsPoll,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_PS_POLL_DEFAULT,
CFG_MAX_PS_POLL_MIN,
CFG_MAX_PS_POLL_MAX ),
REG_VARIABLE( CFG_MAX_TX_POWER_NAME, WLAN_PARAM_Integer,
hdd_config_t, nTxPowerCap,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_TX_POWER_DEFAULT,
CFG_MAX_TX_POWER_MIN,
CFG_MAX_TX_POWER_MAX ),
REG_VARIABLE( CFG_LOW_GAIN_OVERRIDE_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIsLowGainOverride,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LOW_GAIN_OVERRIDE_DEFAULT,
CFG_LOW_GAIN_OVERRIDE_MIN,
CFG_LOW_GAIN_OVERRIDE_MAX ),
REG_VARIABLE( CFG_RSSI_FILTER_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nRssiFilterPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RSSI_FILTER_PERIOD_DEFAULT,
CFG_RSSI_FILTER_PERIOD_MIN,
CFG_RSSI_FILTER_PERIOD_MAX ),
REG_VARIABLE( CFG_IGNORE_DTIM_NAME, WLAN_PARAM_Integer,
hdd_config_t, fIgnoreDtim,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IGNORE_DTIM_DEFAULT,
CFG_IGNORE_DTIM_MIN,
CFG_IGNORE_DTIM_MAX ),
REG_VARIABLE( CFG_MAX_LI_MODULATED_DTIM_NAME, WLAN_PARAM_Integer,
hdd_config_t, fMaxLIModulatedDTIM,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_LI_MODULATED_DTIM_DEFAULT,
CFG_MAX_LI_MODULATED_DTIM_MIN,
CFG_MAX_LI_MODULATED_DTIM_MAX ),
REG_VARIABLE( CFG_RX_ANT_CONFIGURATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, nRxAnt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RX_ANT_CONFIGURATION_NAME_DEFAULT,
CFG_RX_ANT_CONFIGURATION_NAME_MIN,
CFG_RX_ANT_CONFIGURATION_NAME_MAX ),
REG_VARIABLE( CFG_FW_HEART_BEAT_MONITORING_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableFwHeartBeatMonitoring,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FW_HEART_BEAT_MONITORING_DEFAULT,
CFG_FW_HEART_BEAT_MONITORING_MIN,
CFG_FW_HEART_BEAT_MONITORING_MAX ),
REG_VARIABLE( CFG_FW_BEACON_FILTERING_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableFwBeaconFiltering,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FW_BEACON_FILTERING_DEFAULT,
CFG_FW_BEACON_FILTERING_MIN,
CFG_FW_BEACON_FILTERING_MAX ),
REG_DYNAMIC_VARIABLE( CFG_FW_RSSI_MONITORING_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableFwRssiMonitoring,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FW_RSSI_MONITORING_DEFAULT,
CFG_FW_RSSI_MONITORING_MIN,
CFG_FW_RSSI_MONITORING_MAX,
cbNotifySetFwRssiMonitoring, 0 ),
REG_VARIABLE( CFG_DATA_INACTIVITY_TIMEOUT_NAME, WLAN_PARAM_Integer,
hdd_config_t, nDataInactivityTimeout,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DATA_INACTIVITY_TIMEOUT_DEFAULT,
CFG_DATA_INACTIVITY_TIMEOUT_MIN,
CFG_DATA_INACTIVITY_TIMEOUT_MAX ),
REG_VARIABLE( CFG_NTH_BEACON_FILTER_NAME, WLAN_PARAM_Integer,
hdd_config_t, nthBeaconFilter,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NTH_BEACON_FILTER_DEFAULT,
CFG_NTH_BEACON_FILTER_MIN,
CFG_NTH_BEACON_FILTER_MAX ),
REG_VARIABLE( CFG_QOS_WMM_MODE_NAME , WLAN_PARAM_Integer,
hdd_config_t, WmmMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_MODE_DEFAULT,
CFG_QOS_WMM_MODE_MIN,
CFG_QOS_WMM_MODE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_80211E_ENABLED_NAME , WLAN_PARAM_Integer,
hdd_config_t, b80211eIsEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_80211E_ENABLED_DEFAULT,
CFG_QOS_WMM_80211E_ENABLED_MIN,
CFG_QOS_WMM_80211E_ENABLED_MAX ),
REG_VARIABLE( CFG_QOS_WMM_UAPSD_MASK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, UapsdMask,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_UAPSD_MASK_DEFAULT,
CFG_QOS_WMM_UAPSD_MASK_MIN,
CFG_QOS_WMM_UAPSD_MASK_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_VO_SRV_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdVoSrvIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VO_SRV_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VO_SRV_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_VO_SRV_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_VO_SUS_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdVoSuspIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VO_SUS_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VO_SUS_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_VO_SUS_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_VI_SRV_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdViSrvIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VI_SRV_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VI_SRV_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_VI_SRV_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_VI_SUS_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdViSuspIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VI_SUS_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_VI_SUS_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_VI_SUS_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_BE_SRV_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdBeSrvIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BE_SRV_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BE_SRV_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_BE_SRV_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_BE_SUS_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdBeSuspIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BE_SUS_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BE_SUS_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_BE_SUS_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_BK_SRV_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdBkSrvIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BK_SRV_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BK_SRV_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_BK_SRV_INTV_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_UAPSD_BK_SUS_INTV_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraUapsdBkSuspIntv,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BK_SUS_INTV_DEFAULT,
CFG_QOS_WMM_INFRA_UAPSD_BK_SUS_INTV_MIN,
CFG_QOS_WMM_INFRA_UAPSD_BK_SUS_INTV_MAX ),
#ifdef FEATURE_WLAN_ESE
REG_VARIABLE( CFG_QOS_WMM_INFRA_INACTIVITY_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, InfraInactivityInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_INACTIVITY_INTERVAL_DEFAULT,
CFG_QOS_WMM_INFRA_INACTIVITY_INTERVAL_MIN,
CFG_QOS_WMM_INFRA_INACTIVITY_INTERVAL_MAX),
REG_DYNAMIC_VARIABLE( CFG_ESE_FEATURE_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, isEseIniFeatureEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ESE_FEATURE_ENABLED_DEFAULT,
CFG_ESE_FEATURE_ENABLED_MIN,
CFG_ESE_FEATURE_ENABLED_MAX,
cbNotifySetEseFeatureEnabled, 0 ),
#endif // FEATURE_WLAN_ESE
#ifdef FEATURE_WLAN_LFR
// flag to turn ON/OFF Legacy Fast Roaming
REG_DYNAMIC_VARIABLE( CFG_LFR_FEATURE_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, isFastRoamIniFeatureEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LFR_FEATURE_ENABLED_DEFAULT,
CFG_LFR_FEATURE_ENABLED_MIN,
CFG_LFR_FEATURE_ENABLED_MAX,
NotifyIsFastRoamIniFeatureEnabled, 0 ),
/* flag to turn ON/OFF Motion assistance for Legacy Fast Roaming */
REG_DYNAMIC_VARIABLE( CFG_LFR_MAWC_FEATURE_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, MAWCEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LFR_MAWC_FEATURE_ENABLED_DEFAULT,
CFG_LFR_MAWC_FEATURE_ENABLED_MIN,
CFG_LFR_MAWC_FEATURE_ENABLED_MAX,
NotifyIsMAWCIniFeatureEnabled, 0 ),
#endif // FEATURE_WLAN_LFR
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
// flag to turn ON/OFF 11r and ESE FastTransition
REG_DYNAMIC_VARIABLE( CFG_FAST_TRANSITION_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, isFastTransitionEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FAST_TRANSITION_ENABLED_NAME_DEFAULT,
CFG_FAST_TRANSITION_ENABLED_NAME_MIN,
CFG_FAST_TRANSITION_ENABLED_NAME_MAX,
cbNotifySetFastTransitionEnabled, 0 ),
/* Variable to specify the delta/difference between the RSSI of current AP
* and roamable AP while roaming */
REG_DYNAMIC_VARIABLE( CFG_ROAM_RSSI_DIFF_NAME, WLAN_PARAM_Integer,
hdd_config_t, RoamRssiDiff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_RSSI_DIFF_DEFAULT,
CFG_ROAM_RSSI_DIFF_MIN,
CFG_ROAM_RSSI_DIFF_MAX,
cbNotifySetRoamRssiDiff, 0),
REG_DYNAMIC_VARIABLE( CFG_IMMEDIATE_ROAM_RSSI_DIFF_NAME, WLAN_PARAM_Integer,
hdd_config_t, nImmediateRoamRssiDiff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IMMEDIATE_ROAM_RSSI_DIFF_DEFAULT,
CFG_IMMEDIATE_ROAM_RSSI_DIFF_MIN,
CFG_IMMEDIATE_ROAM_RSSI_DIFF_MAX,
cbNotifySetImmediateRoamRssiDiff, 0),
REG_DYNAMIC_VARIABLE( CFG_ENABLE_WES_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, isWESModeEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_WES_MODE_NAME_DEFAULT,
CFG_ENABLE_WES_MODE_NAME_MIN,
CFG_ENABLE_WES_MODE_NAME_MAX,
cbNotifySetWESMode, 0),
#endif
#ifdef FEATURE_WLAN_OKC
REG_DYNAMIC_VARIABLE( CFG_OKC_FEATURE_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, isOkcIniFeatureEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OKC_FEATURE_ENABLED_DEFAULT,
CFG_OKC_FEATURE_ENABLED_MIN,
CFG_OKC_FEATURE_ENABLED_MAX,
cbNotifySetOkcFeatureEnabled, 0 ),
#endif
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
REG_DYNAMIC_VARIABLE( CFG_ROAM_SCAN_OFFLOAD_ENABLED, WLAN_PARAM_Integer,
hdd_config_t, isRoamOffloadScanEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_SCAN_OFFLOAD_ENABLED_DEFAULT,
CFG_ROAM_SCAN_OFFLOAD_ENABLED_MIN,
CFG_ROAM_SCAN_OFFLOAD_ENABLED_MAX,
cbNotifyUpdateRoamScanOffloadEnabled, 0),
#endif
REG_VARIABLE( CFG_QOS_WMM_PKT_CLASSIFY_BASIS_NAME , WLAN_PARAM_Integer,
hdd_config_t, PktClassificationBasis,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_PKT_CLASSIFY_BASIS_DEFAULT,
CFG_QOS_WMM_PKT_CLASSIFY_BASIS_MIN,
CFG_QOS_WMM_PKT_CLASSIFY_BASIS_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_DIR_AC_VO_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraDirAcVo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_VO_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_VO_MIN,
CFG_QOS_WMM_INFRA_DIR_AC_VO_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VO_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraNomMsduSizeAcVo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VO_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VO_MIN,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VO_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VO_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMeanDataRateAcVo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VO_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VO_MIN,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VO_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VO_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMinPhyRateAcVo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VO_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VO_MIN,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VO_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_SBA_AC_VO_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraSbaAcVo,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_VO_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_VO_MIN,
CFG_QOS_WMM_INFRA_SBA_AC_VO_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_DIR_AC_VI_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraDirAcVi,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_VI_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_VI_MIN,
CFG_QOS_WMM_INFRA_DIR_AC_VI_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VI_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraNomMsduSizeAcVi,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VI_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VI_MIN,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_VI_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VI_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMeanDataRateAcVi,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VI_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VI_MIN,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_VI_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VI_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMinPhyRateAcVi,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VI_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VI_MIN,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_VI_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_SBA_AC_VI_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraSbaAcVi,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_VI_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_VI_MIN,
CFG_QOS_WMM_INFRA_SBA_AC_VI_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_DIR_AC_BE_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraDirAcBe,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_BE_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_BE_MIN,
CFG_QOS_WMM_INFRA_DIR_AC_BE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BE_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraNomMsduSizeAcBe,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BE_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BE_MIN,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BE_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMeanDataRateAcBe,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BE_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BE_MIN,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BE_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMinPhyRateAcBe,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BE_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BE_MIN,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_SBA_AC_BE_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraSbaAcBe,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_BE_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_BE_MIN,
CFG_QOS_WMM_INFRA_SBA_AC_BE_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_DIR_AC_BK_NAME , WLAN_PARAM_Integer,
hdd_config_t, InfraDirAcBk,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_BK_DEFAULT,
CFG_QOS_WMM_INFRA_DIR_AC_BK_MIN,
CFG_QOS_WMM_INFRA_DIR_AC_BK_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraNomMsduSizeAcBk,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BK_DEFAULT,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BK_MIN,
CFG_QOS_WMM_INFRA_NOM_MSDU_SIZE_AC_BK_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMeanDataRateAcBk,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BK_DEFAULT,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BK_MIN,
CFG_QOS_WMM_INFRA_MEAN_DATA_RATE_AC_BK_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraMinPhyRateAcBk,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BK_DEFAULT,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BK_MIN,
CFG_QOS_WMM_INFRA_MIN_PHY_RATE_AC_BK_MAX ),
REG_VARIABLE( CFG_QOS_WMM_INFRA_SBA_AC_BK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, InfraSbaAcBk,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_BK_DEFAULT,
CFG_QOS_WMM_INFRA_SBA_AC_BK_MIN,
CFG_QOS_WMM_INFRA_SBA_AC_BK_MAX ),
REG_VARIABLE( CFG_TL_WFQ_BK_WEIGHT_NAME , WLAN_PARAM_Integer,
hdd_config_t, WfqBkWeight,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TL_WFQ_BK_WEIGHT_DEFAULT,
CFG_TL_WFQ_BK_WEIGHT_MIN,
CFG_TL_WFQ_BK_WEIGHT_MAX ),
REG_VARIABLE( CFG_TL_WFQ_BE_WEIGHT_NAME , WLAN_PARAM_Integer,
hdd_config_t, WfqBeWeight,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TL_WFQ_BE_WEIGHT_DEFAULT,
CFG_TL_WFQ_BE_WEIGHT_MIN,
CFG_TL_WFQ_BE_WEIGHT_MAX ),
REG_VARIABLE( CFG_TL_WFQ_VI_WEIGHT_NAME , WLAN_PARAM_Integer,
hdd_config_t, WfqViWeight,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TL_WFQ_VI_WEIGHT_DEFAULT,
CFG_TL_WFQ_VI_WEIGHT_MIN,
CFG_TL_WFQ_VI_WEIGHT_MAX ),
REG_VARIABLE( CFG_TL_WFQ_VO_WEIGHT_NAME , WLAN_PARAM_Integer,
hdd_config_t, WfqVoWeight,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TL_WFQ_VO_WEIGHT_DEFAULT,
CFG_TL_WFQ_VO_WEIGHT_MIN,
CFG_TL_WFQ_VO_WEIGHT_MAX ),
REG_VARIABLE( CFG_TL_DELAYED_TRGR_FRM_INT_NAME , WLAN_PARAM_Integer,
hdd_config_t, DelayedTriggerFrmInt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TL_DELAYED_TRGR_FRM_INT_DEFAULT,
CFG_TL_DELAYED_TRGR_FRM_INT_MIN,
CFG_TL_DELAYED_TRGR_FRM_INT_MAX ),
REG_VARIABLE( CFG_REORDER_TIME_BK_NAME , WLAN_PARAM_Integer,
hdd_config_t, BkReorderAgingTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REORDER_TIME_BK_DEFAULT,
CFG_REORDER_TIME_BK_MIN,
CFG_REORDER_TIME_BK_MAX ),
REG_VARIABLE( CFG_REORDER_TIME_BE_NAME , WLAN_PARAM_Integer,
hdd_config_t, BeReorderAgingTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REORDER_TIME_BE_DEFAULT,
CFG_REORDER_TIME_BE_MIN,
CFG_REORDER_TIME_BE_MAX ),
REG_VARIABLE( CFG_REORDER_TIME_VI_NAME , WLAN_PARAM_Integer,
hdd_config_t, ViReorderAgingTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REORDER_TIME_VI_DEFAULT,
CFG_REORDER_TIME_VI_MIN,
CFG_REORDER_TIME_VI_MAX ),
REG_VARIABLE( CFG_REORDER_TIME_VO_NAME , WLAN_PARAM_Integer,
hdd_config_t, VoReorderAgingTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REORDER_TIME_VO_DEFAULT,
CFG_REORDER_TIME_VO_MIN,
CFG_REORDER_TIME_VO_MAX ),
REG_VARIABLE_STRING( CFG_WOWL_PATTERN_NAME, WLAN_PARAM_String,
hdd_config_t, wowlPattern,
VAR_FLAGS_OPTIONAL,
(void *)CFG_WOWL_PATTERN_DEFAULT ),
REG_VARIABLE( CFG_QOS_IMPLICIT_SETUP_ENABLED_NAME , WLAN_PARAM_Integer,
hdd_config_t, bImplicitQosEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_IMPLICIT_SETUP_ENABLED_DEFAULT,
CFG_QOS_IMPLICIT_SETUP_ENABLED_MIN,
CFG_QOS_IMPLICIT_SETUP_ENABLED_MAX ),
REG_VARIABLE( CFG_BTC_EXECUTION_MODE_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcExecutionMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_EXECUTION_MODE_DEFAULT,
CFG_BTC_EXECUTION_MODE_MIN,
CFG_BTC_EXECUTION_MODE_MAX ),
REG_VARIABLE( CFG_BTC_DHCP_PROTECTION_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcConsBtSlotsToBlockDuringDhcp,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DHCP_PROTECTION_DEFAULT,
CFG_BTC_DHCP_PROTECTION_MIN,
CFG_BTC_DHCP_PROTECTION_MAX ),
REG_VARIABLE( CFG_BTC_A2DP_DHCP_PROTECTION_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcA2DPBtSubIntervalsDuringDhcp,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_A2DP_DHCP_PROTECTION_DEFAULT,
CFG_BTC_A2DP_DHCP_PROTECTION_MIN,
CFG_BTC_A2DP_DHCP_PROTECTION_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_INQ_BT_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenInqBt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_INQ_BT_DEFAULT,
CFG_BTC_STATIC_LEN_INQ_BT_MIN,
CFG_BTC_STATIC_LEN_INQ_BT_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_PAGE_BT_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenPageBt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_PAGE_BT_DEFAULT,
CFG_BTC_STATIC_LEN_PAGE_BT_MIN,
CFG_BTC_STATIC_LEN_PAGE_BT_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_CONN_BT_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenConnBt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_CONN_BT_DEFAULT,
CFG_BTC_STATIC_LEN_CONN_BT_MIN,
CFG_BTC_STATIC_LEN_CONN_BT_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_LE_BT_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenLeBt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_LE_BT_DEFAULT,
CFG_BTC_STATIC_LEN_LE_BT_MIN,
CFG_BTC_STATIC_LEN_LE_BT_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_INQ_WLAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenInqWlan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_INQ_WLAN_DEFAULT,
CFG_BTC_STATIC_LEN_INQ_WLAN_MIN,
CFG_BTC_STATIC_LEN_INQ_WLAN_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_PAGE_WLAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenPageWlan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_PAGE_WLAN_DEFAULT,
CFG_BTC_STATIC_LEN_PAGE_WLAN_MIN,
CFG_BTC_STATIC_LEN_PAGE_WLAN_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_CONN_WLAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenConnWlan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_CONN_WLAN_DEFAULT,
CFG_BTC_STATIC_LEN_CONN_WLAN_MIN,
CFG_BTC_STATIC_LEN_CONN_WLAN_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_LEN_LE_WLAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcStaticLenLeWlan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_LEN_LE_WLAN_DEFAULT,
CFG_BTC_STATIC_LEN_LE_WLAN_MIN,
CFG_BTC_STATIC_LEN_LE_WLAN_MAX ),
REG_VARIABLE( CFG_BTC_DYN_MAX_LEN_BT_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcDynMaxLenBt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DYN_MAX_LEN_BT_DEFAULT,
CFG_BTC_DYN_MAX_LEN_BT_MIN,
CFG_BTC_DYN_MAX_LEN_BT_MAX ),
REG_VARIABLE( CFG_BTC_DYN_MAX_LEN_WLAN_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcDynMaxLenWlan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DYN_MAX_LEN_WLAN_DEFAULT,
CFG_BTC_DYN_MAX_LEN_WLAN_MIN,
CFG_BTC_DYN_MAX_LEN_WLAN_MAX ),
REG_VARIABLE( CFG_BTC_MAX_SCO_BLOCK_PERC_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcMaxScoBlockPerc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_MAX_SCO_BLOCK_PERC_DEFAULT,
CFG_BTC_MAX_SCO_BLOCK_PERC_MIN,
CFG_BTC_MAX_SCO_BLOCK_PERC_MAX ),
REG_VARIABLE( CFG_BTC_DHCP_PROT_ON_A2DP_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcDhcpProtOnA2dp,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DHCP_PROT_ON_A2DP_DEFAULT,
CFG_BTC_DHCP_PROT_ON_A2DP_MIN,
CFG_BTC_DHCP_PROT_ON_A2DP_MAX ),
REG_VARIABLE( CFG_BTC_DHCP_PROT_ON_SCO_NAME , WLAN_PARAM_Integer,
hdd_config_t, btcDhcpProtOnSco,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DHCP_PROT_ON_SCO_DEFAULT,
CFG_BTC_DHCP_PROT_ON_SCO_MIN,
CFG_BTC_DHCP_PROT_ON_SCO_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V1_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[0],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V1_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[0],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V1_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[0],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V1_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[0],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V2_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[1],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V2_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[1],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V2_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[1],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V2_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[1],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V3_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[2],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V3_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[2],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V3_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[2],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V3_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[2],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V4_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[3],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V4_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[3],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V4_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[3],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V4_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[3],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V5_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[4],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V5_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[4],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V5_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[4],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V5_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[4],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V6_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[5],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V6_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[5],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V6_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[5],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V6_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[5],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V7_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[6],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V7_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[6],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V7_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[6],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V7_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[6],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V8_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[7],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V8_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[7],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V8_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[7],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V8_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[7],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V9_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[8],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V9_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[8],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V9_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[8],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V9_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[8],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V10_WAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWANFreq[9],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WAN_FREQ_MIN,
CFG_MWS_COEX_VX_WAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V10_WLAN_FREQ_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimWLANFreq[9],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_DEFAULT,
CFG_MWS_COEX_VX_WLAN_FREQ_MIN,
CFG_MWS_COEX_VX_WLAN_FREQ_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V10_CONFIG_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig[9],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_V10_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexVictimConfig2[9],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_DEFAULT,
CFG_MWS_COEX_VX_CONFIG_MIN,
CFG_MWS_COEX_VX_CONFIG_MAX ),
REG_VARIABLE( CFG_MWS_COEX_MODEM_BACKOFF_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexModemBackoff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_MODEM_BACKOFF_DEFAULT,
CFG_MWS_COEX_MODEM_BACKOFF_MIN,
CFG_MWS_COEX_MODEM_BACKOFF_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG1_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[0],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG2_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[1],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG3_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[2],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG4_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[3],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG5_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[4],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_MWS_COEX_CONFIG6_NAME , WLAN_PARAM_Integer,
hdd_config_t, mwsCoexConfig[5],
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MWS_COEX_CONFIGX_DEFAULT,
CFG_MWS_COEX_CONFIGX_MIN,
CFG_MWS_COEX_CONFIGX_MAX ),
REG_VARIABLE( CFG_SAR_POWER_BACKOFF_NAME , WLAN_PARAM_Integer,
hdd_config_t, SARPowerBackoff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAR_POWER_BACKOFF_DEFAULT,
CFG_SAR_POWER_BACKOFF_MIN,
CFG_SAR_POWER_BACKOFF_MAX ),
REG_VARIABLE( CFG_AP_LISTEN_MODE_NAME , WLAN_PARAM_Integer,
hdd_config_t, nEnableListenMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_LISTEN_MODE_DEFAULT,
CFG_AP_LISTEN_MODE_MIN,
CFG_AP_LISTEN_MODE_MAX ),
REG_VARIABLE( CFG_AP_AUTO_SHUT_OFF , WLAN_PARAM_Integer,
hdd_config_t, nAPAutoShutOff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_AUTO_SHUT_OFF_DEFAULT,
CFG_AP_AUTO_SHUT_OFF_MIN,
CFG_AP_AUTO_SHUT_OFF_MAX ),
#if defined WLAN_FEATURE_VOWIFI
REG_VARIABLE( CFG_RRM_ENABLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, fRrmEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RRM_ENABLE_DEFAULT,
CFG_RRM_ENABLE_MIN,
CFG_RRM_ENABLE_MAX ),
REG_VARIABLE( CFG_RRM_OPERATING_CHAN_MAX_DURATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, nInChanMeasMaxDuration,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RRM_OPERATING_CHAN_MAX_DURATION_DEFAULT,
CFG_RRM_OPERATING_CHAN_MAX_DURATION_MIN,
CFG_RRM_OPERATING_CHAN_MAX_DURATION_MAX ),
REG_VARIABLE( CFG_RRM_NON_OPERATING_CHAN_MAX_DURATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, nOutChanMeasMaxDuration,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RRM_NON_OPERATING_CHAN_MAX_DURATION_DEFAULT,
CFG_RRM_NON_OPERATING_CHAN_MAX_DURATION_MIN,
CFG_RRM_NON_OPERATING_CHAN_MAX_DURATION_MAX ),
REG_VARIABLE( CFG_RRM_MEAS_RANDOMIZATION_INTVL_NAME, WLAN_PARAM_Integer,
hdd_config_t, nRrmRandnIntvl,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RRM_MEAS_RANDOMIZATION_INTVL_DEFAULT,
CFG_RRM_MEAS_RANDOMIZATION_INTVL_MIN,
CFG_RRM_MEAS_RANDOMIZATION_INTVL_MAX ),
#endif
#ifdef WLAN_FEATURE_VOWIFI_11R
REG_VARIABLE( CFG_FT_RESOURCE_REQ_NAME, WLAN_PARAM_Integer,
hdd_config_t, fFTResourceReqSupported,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_FT_RESOURCE_REQ_DEFAULT,
CFG_FT_RESOURCE_REQ_MIN,
CFG_FT_RESOURCE_REQ_MAX ),
#endif
REG_VARIABLE( CFG_ENABLE_ROAM_DELAY_STATS, WLAN_PARAM_Integer,
hdd_config_t, gEnableRoamDelayStats,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_ROAM_DELAY_STATS_DEFAULT,
CFG_ENABLE_ROAM_DELAY_STATS_MIN,
CFG_ENABLE_ROAM_DELAY_STATS_MAX ),
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
REG_DYNAMIC_VARIABLE( CFG_NEIGHBOR_SCAN_TIMER_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborScanPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_SCAN_TIMER_PERIOD_DEFAULT,
CFG_NEIGHBOR_SCAN_TIMER_PERIOD_MIN,
CFG_NEIGHBOR_SCAN_TIMER_PERIOD_MAX,
cbNotifySetNeighborScanPeriod, 0 ),
REG_VARIABLE( CFG_NEIGHBOR_REASSOC_RSSI_THRESHOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborReassocRssiThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_REASSOC_RSSI_THRESHOLD_DEFAULT,
CFG_NEIGHBOR_REASSOC_RSSI_THRESHOLD_MIN,
CFG_NEIGHBOR_REASSOC_RSSI_THRESHOLD_MAX ),
REG_DYNAMIC_VARIABLE( CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborLookupRssiThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_DEFAULT,
CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_MIN,
CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_MAX,
cbNotifySetNeighborLookupRssiThreshold, 0 ),
REG_VARIABLE_STRING( CFG_NEIGHBOR_SCAN_CHAN_LIST_NAME, WLAN_PARAM_String,
hdd_config_t, neighborScanChanList,
VAR_FLAGS_OPTIONAL,
(void *)CFG_NEIGHBOR_SCAN_CHAN_LIST_DEFAULT ),
REG_DYNAMIC_VARIABLE( CFG_NEIGHBOR_SCAN_MIN_CHAN_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborScanMinChanTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_SCAN_MIN_CHAN_TIME_DEFAULT,
CFG_NEIGHBOR_SCAN_MIN_CHAN_TIME_MIN,
CFG_NEIGHBOR_SCAN_MIN_CHAN_TIME_MAX,
cbNotifySetNeighborScanMinChanTime, 0 ),
REG_DYNAMIC_VARIABLE( CFG_NEIGHBOR_SCAN_MAX_CHAN_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborScanMaxChanTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_SCAN_MAX_CHAN_TIME_DEFAULT,
CFG_NEIGHBOR_SCAN_MAX_CHAN_TIME_MIN,
CFG_NEIGHBOR_SCAN_MAX_CHAN_TIME_MAX,
cbNotifySetNeighborScanMaxChanTime, 0 ),
REG_VARIABLE( CFG_11R_NEIGHBOR_REQ_MAX_TRIES_NAME, WLAN_PARAM_Integer,
hdd_config_t, nMaxNeighborReqTries,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_11R_NEIGHBOR_REQ_MAX_TRIES_DEFAULT,
CFG_11R_NEIGHBOR_REQ_MAX_TRIES_MIN,
CFG_11R_NEIGHBOR_REQ_MAX_TRIES_MAX),
REG_DYNAMIC_VARIABLE( CFG_NEIGHBOR_SCAN_RESULTS_REFRESH_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborResultsRefreshPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_SCAN_RESULTS_REFRESH_PERIOD_DEFAULT,
CFG_NEIGHBOR_SCAN_RESULTS_REFRESH_PERIOD_MIN,
CFG_NEIGHBOR_SCAN_RESULTS_REFRESH_PERIOD_MAX,
cbNotifySetNeighborResultsRefreshPeriod, 0 ),
REG_DYNAMIC_VARIABLE( CFG_EMPTY_SCAN_REFRESH_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, nEmptyScanRefreshPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_EMPTY_SCAN_REFRESH_PERIOD_DEFAULT,
CFG_EMPTY_SCAN_REFRESH_PERIOD_MIN,
CFG_EMPTY_SCAN_REFRESH_PERIOD_MAX,
cbNotifySetEmptyScanRefreshPeriod, 0 ),
REG_VARIABLE( CFG_NEIGHBOR_INITIAL_FORCED_ROAM_TO_5GH_ENABLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNeighborInitialForcedRoamTo5GhEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NEIGHBOR_INITIAL_FORCED_ROAM_TO_5GH_ENABLE_DEFAULT,
CFG_NEIGHBOR_INITIAL_FORCED_ROAM_TO_5GH_ENABLE_MIN,
CFG_NEIGHBOR_INITIAL_FORCED_ROAM_TO_5GH_ENABLE_MAX),
#endif /* WLAN_FEATURE_NEIGHBOR_ROAMING */
REG_VARIABLE( CFG_QOS_WMM_BURST_SIZE_DEFN_NAME , WLAN_PARAM_Integer,
hdd_config_t, burstSizeDefinition,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_BURST_SIZE_DEFN_DEFAULT,
CFG_QOS_WMM_BURST_SIZE_DEFN_MIN,
CFG_QOS_WMM_BURST_SIZE_DEFN_MAX ),
REG_VARIABLE( CFG_MCAST_BCAST_FILTER_SETTING_NAME, WLAN_PARAM_Integer,
hdd_config_t, mcastBcastFilterSetting,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MCAST_BCAST_FILTER_SETTING_DEFAULT,
CFG_MCAST_BCAST_FILTER_SETTING_MIN,
CFG_MCAST_BCAST_FILTER_SETTING_MAX ),
REG_VARIABLE( CFG_ENABLE_HOST_ARPOFFLOAD_NAME, WLAN_PARAM_Integer,
hdd_config_t, fhostArpOffload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_HOST_ARPOFFLOAD_DEFAULT,
CFG_ENABLE_HOST_ARPOFFLOAD_MIN,
CFG_ENABLE_HOST_ARPOFFLOAD_MAX ),
REG_VARIABLE( CFG_ENABLE_HOST_NSOFFLOAD_NAME, WLAN_PARAM_Integer,
hdd_config_t, fhostNSOffload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_HOST_NSOFFLOAD_DEFAULT,
CFG_ENABLE_HOST_NSOFFLOAD_MIN,
CFG_ENABLE_HOST_NSOFFLOAD_MAX ),
REG_VARIABLE( CFG_QOS_WMM_TS_INFO_ACK_POLICY_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, tsInfoAckPolicy,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_WMM_TS_INFO_ACK_POLICY_DEFAULT,
CFG_QOS_WMM_TS_INFO_ACK_POLICY_MIN,
CFG_QOS_WMM_TS_INFO_ACK_POLICY_MAX ),
REG_VARIABLE( CFG_SINGLE_TID_RC_NAME, WLAN_PARAM_Integer,
hdd_config_t, bSingleTidRc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SINGLE_TID_RC_DEFAULT,
CFG_SINGLE_TID_RC_MIN,
CFG_SINGLE_TID_RC_MAX),
REG_VARIABLE( CFG_DYNAMIC_PSPOLL_VALUE_NAME, WLAN_PARAM_Integer,
hdd_config_t, dynamicPsPollValue,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DYNAMIC_PSPOLL_VALUE_DEFAULT,
CFG_DYNAMIC_PSPOLL_VALUE_MIN,
CFG_DYNAMIC_PSPOLL_VALUE_MAX ),
REG_VARIABLE( CFG_TELE_BCN_WAKEUP_EN_NAME, WLAN_PARAM_Integer,
hdd_config_t, teleBcnWakeupEn,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TELE_BCN_WAKEUP_EN_DEFAULT,
CFG_TELE_BCN_WAKEUP_EN_MIN,
CFG_TELE_BCN_WAKEUP_EN_MAX ),
REG_VARIABLE( CFG_INFRA_STA_KEEP_ALIVE_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, infraStaKeepAlivePeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_INFRA_STA_KEEP_ALIVE_PERIOD_DEFAULT,
CFG_INFRA_STA_KEEP_ALIVE_PERIOD_MIN,
CFG_INFRA_STA_KEEP_ALIVE_PERIOD_MAX),
REG_VARIABLE( CFG_QOS_ADDTS_WHEN_ACM_IS_OFF_NAME , WLAN_PARAM_Integer,
hdd_config_t, AddTSWhenACMIsOff,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_QOS_ADDTS_WHEN_ACM_IS_OFF_DEFAULT,
CFG_QOS_ADDTS_WHEN_ACM_IS_OFF_MIN,
CFG_QOS_ADDTS_WHEN_ACM_IS_OFF_MAX ),
REG_VARIABLE( CFG_VALIDATE_SCAN_LIST_NAME , WLAN_PARAM_Integer,
hdd_config_t, fValidateScanList,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VALIDATE_SCAN_LIST_DEFAULT,
CFG_VALIDATE_SCAN_LIST_MIN,
CFG_VALIDATE_SCAN_LIST_MAX ),
REG_VARIABLE( CFG_NULLDATA_AP_RESP_TIMEOUT_NAME, WLAN_PARAM_Integer,
hdd_config_t, nNullDataApRespTimeout,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_NULLDATA_AP_RESP_TIMEOUT_DEFAULT,
CFG_NULLDATA_AP_RESP_TIMEOUT_MIN,
CFG_NULLDATA_AP_RESP_TIMEOUT_MAX ),
REG_VARIABLE( CFG_AP_DATA_AVAIL_POLL_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, apDataAvailPollPeriodInMs,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_DATA_AVAIL_POLL_PERIOD_DEFAULT,
CFG_AP_DATA_AVAIL_POLL_PERIOD_MIN,
CFG_AP_DATA_AVAIL_POLL_PERIOD_MAX ),
REG_VARIABLE( CFG_ENABLE_BTAMP_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableBtAmp,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_BTAMP_DEFAULT,
CFG_ENABLE_BTAMP_MIN,
CFG_ENABLE_BTAMP_MAX ),
#ifdef WLAN_BTAMP_FEATURE
REG_VARIABLE( CFG_BT_AMP_PREFERRED_CHANNEL_NAME, WLAN_PARAM_Integer,
hdd_config_t, preferredChannel,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BT_AMP_PREFERRED_CHANNEL_DEFAULT,
CFG_BT_AMP_PREFERRED_CHANNEL_MIN,
CFG_BT_AMP_PREFERRED_CHANNEL_MAX ),
#endif //WLAN_BTAMP_FEATURE
REG_VARIABLE( CFG_BAND_CAPABILITY_NAME, WLAN_PARAM_Integer,
hdd_config_t, nBandCapability,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BAND_CAPABILITY_DEFAULT,
CFG_BAND_CAPABILITY_MIN,
CFG_BAND_CAPABILITY_MAX ),
REG_VARIABLE( CFG_ENABLE_BEACON_EARLY_TERMINATION_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableBeaconEarlyTermination,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_BEACON_EARLY_TERMINATION_DEFAULT,
CFG_ENABLE_BEACON_EARLY_TERMINATION_MIN,
CFG_ENABLE_BEACON_EARLY_TERMINATION_MAX ),
/* CFG_VOS_TRACE_ENABLE Parameters */
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_BAP_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableBAP,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_TL_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableTL,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_WDI_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableWDI,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_HDD_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableHDD,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_SME_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableSME,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_PE_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnablePE,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_PMC_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnablePMC,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_WDA_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableWDA,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_SYS_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableSYS,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_VOSS_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableVOSS,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_SAP_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableSAP,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_VOS_TRACE_ENABLE_HDD_SAP_NAME, WLAN_PARAM_Integer,
hdd_config_t, vosTraceEnableHDDSAP,
VAR_FLAGS_OPTIONAL,
CFG_VOS_TRACE_ENABLE_DEFAULT,
CFG_VOS_TRACE_ENABLE_MIN,
CFG_VOS_TRACE_ENABLE_MAX ),
/* note that since the default value is out of range we cannot
enable range check, otherwise we get a system log message */
REG_VARIABLE( CFG_WDI_TRACE_ENABLE_DAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, wdiTraceEnableDAL,
VAR_FLAGS_OPTIONAL,
CFG_WDI_TRACE_ENABLE_DEFAULT,
CFG_WDI_TRACE_ENABLE_MIN,
CFG_WDI_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_WDI_TRACE_ENABLE_CTL_NAME, WLAN_PARAM_Integer,
hdd_config_t, wdiTraceEnableCTL,
VAR_FLAGS_OPTIONAL,
CFG_WDI_TRACE_ENABLE_DEFAULT,
CFG_WDI_TRACE_ENABLE_MIN,
CFG_WDI_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_WDI_TRACE_ENABLE_DAT_NAME, WLAN_PARAM_Integer,
hdd_config_t, wdiTraceEnableDAT,
VAR_FLAGS_OPTIONAL,
CFG_WDI_TRACE_ENABLE_DEFAULT,
CFG_WDI_TRACE_ENABLE_MIN,
CFG_WDI_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_WDI_TRACE_ENABLE_PAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, wdiTraceEnablePAL,
VAR_FLAGS_OPTIONAL,
CFG_WDI_TRACE_ENABLE_DEFAULT,
CFG_WDI_TRACE_ENABLE_MIN,
CFG_WDI_TRACE_ENABLE_MAX ),
REG_VARIABLE( CFG_TELE_BCN_TRANS_LI_NAME, WLAN_PARAM_Integer,
hdd_config_t, nTeleBcnTransListenInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TELE_BCN_TRANS_LI_DEFAULT,
CFG_TELE_BCN_TRANS_LI_MIN,
CFG_TELE_BCN_TRANS_LI_MAX ),
REG_VARIABLE( CFG_TELE_BCN_TRANS_LI_NUM_IDLE_BCNS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nTeleBcnTransLiNumIdleBeacons,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TELE_BCN_TRANS_LI_NUM_IDLE_BCNS_DEFAULT,
CFG_TELE_BCN_TRANS_LI_NUM_IDLE_BCNS_MIN,
CFG_TELE_BCN_TRANS_LI_NUM_IDLE_BCNS_MAX ),
REG_VARIABLE( CFG_TELE_BCN_MAX_LI_NAME, WLAN_PARAM_Integer,
hdd_config_t, nTeleBcnMaxListenInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TELE_BCN_MAX_LI_DEFAULT,
CFG_TELE_BCN_MAX_LI_MIN,
CFG_TELE_BCN_MAX_LI_MAX ),
REG_VARIABLE( CFG_TELE_BCN_MAX_LI_NUM_IDLE_BCNS_NAME, WLAN_PARAM_Integer,
hdd_config_t, nTeleBcnMaxLiNumIdleBeacons,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TELE_BCN_MAX_LI_NUM_IDLE_BCNS_DEFAULT,
CFG_TELE_BCN_MAX_LI_NUM_IDLE_BCNS_MIN,
CFG_TELE_BCN_MAX_LI_NUM_IDLE_BCNS_MAX ),
REG_VARIABLE( CFG_BCN_EARLY_TERM_WAKE_NAME, WLAN_PARAM_Integer,
hdd_config_t, bcnEarlyTermWakeInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BCN_EARLY_TERM_WAKE_DEFAULT,
CFG_BCN_EARLY_TERM_WAKE_MIN,
CFG_BCN_EARLY_TERM_WAKE_MAX ),
REG_VARIABLE( CFG_AP_DATA_AVAIL_POLL_PERIOD_NAME, WLAN_PARAM_Integer,
hdd_config_t, apDataAvailPollPeriodInMs,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AP_DATA_AVAIL_POLL_PERIOD_DEFAULT,
CFG_AP_DATA_AVAIL_POLL_PERIOD_MIN,
CFG_AP_DATA_AVAIL_POLL_PERIOD_MAX ),
REG_VARIABLE( CFG_ENABLE_CLOSE_LOOP_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableCloseLoop,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_CLOSE_LOOP_DEFAULT,
CFG_ENABLE_CLOSE_LOOP_MIN,
CFG_ENABLE_CLOSE_LOOP_MAX ),
REG_VARIABLE( CFG_ENABLE_BYPASS_11D_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableBypass11d,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_BYPASS_11D_DEFAULT,
CFG_ENABLE_BYPASS_11D_MIN,
CFG_ENABLE_BYPASS_11D_MAX ),
REG_VARIABLE( CFG_ENABLE_DFS_CHNL_SCAN_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableDFSChnlScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DFS_CHNL_SCAN_DEFAULT,
CFG_ENABLE_DFS_CHNL_SCAN_MIN,
CFG_ENABLE_DFS_CHNL_SCAN_MAX ),
REG_VARIABLE( CFG_ENABLE_DFS_PNO_CHNL_SCAN_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableDFSPnoChnlScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DFS_PNO_CHNL_SCAN_DEFAULT,
CFG_ENABLE_DFS_PNO_CHNL_SCAN_MIN,
CFG_ENABLE_DFS_PNO_CHNL_SCAN_MAX ),
REG_VARIABLE( CFG_ENABLE_DYNAMIC_DTIM_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableDynamicDTIM,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DYNAMIC_DTIM_DEFAULT,
CFG_ENABLE_DYNAMIC_DTIM_MIN,
CFG_ENABLE_DYNAMIC_DTIM_MAX ),
REG_VARIABLE( CFG_ENABLE_AUTOMATIC_TX_POWER_CONTROL_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableAutomaticTxPowerControl,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_AUTOMATIC_TX_POWER_CONTROL_DEFAULT,
CFG_ENABLE_AUTOMATIC_TX_POWER_CONTROL_MIN,
CFG_ENABLE_AUTOMATIC_TX_POWER_CONTROL_MAX ),
REG_VARIABLE( CFG_SHORT_GI_40MHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, ShortGI40MhzEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SHORT_GI_40MHZ_DEFAULT,
CFG_SHORT_GI_40MHZ_MIN,
CFG_SHORT_GI_40MHZ_MAX ),
REG_DYNAMIC_VARIABLE( CFG_REPORT_MAX_LINK_SPEED, WLAN_PARAM_Integer,
hdd_config_t, reportMaxLinkSpeed,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_REPORT_MAX_LINK_SPEED_DEFAULT,
CFG_REPORT_MAX_LINK_SPEED_MIN,
CFG_REPORT_MAX_LINK_SPEED_MAX,
NULL, 0 ),
REG_DYNAMIC_VARIABLE( CFG_LINK_SPEED_RSSI_HIGH, WLAN_PARAM_SignedInteger,
hdd_config_t, linkSpeedRssiHigh,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_SPEED_RSSI_HIGH_DEFAULT,
CFG_LINK_SPEED_RSSI_HIGH_MIN,
CFG_LINK_SPEED_RSSI_HIGH_MAX,
NULL, 0 ),
REG_DYNAMIC_VARIABLE( CFG_LINK_SPEED_RSSI_MID, WLAN_PARAM_SignedInteger,
hdd_config_t, linkSpeedRssiMid,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_SPEED_RSSI_MID_DEFAULT,
CFG_LINK_SPEED_RSSI_MID_MIN,
CFG_LINK_SPEED_RSSI_MID_MAX,
NULL, 0 ),
REG_DYNAMIC_VARIABLE( CFG_LINK_SPEED_RSSI_LOW, WLAN_PARAM_SignedInteger,
hdd_config_t, linkSpeedRssiLow,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_SPEED_RSSI_LOW_DEFAULT,
CFG_LINK_SPEED_RSSI_LOW_MIN,
CFG_LINK_SPEED_RSSI_LOW_MAX,
NULL, 0 ),
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
REG_DYNAMIC_VARIABLE( CFG_ROAM_PREFER_5GHZ, WLAN_PARAM_Integer,
hdd_config_t, nRoamPrefer5GHz,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_PREFER_5GHZ_DEFAULT,
CFG_ROAM_PREFER_5GHZ_MIN,
CFG_ROAM_PREFER_5GHZ_MAX,
cbNotifySetRoamPrefer5GHz, 0 ),
REG_DYNAMIC_VARIABLE( CFG_ROAM_INTRA_BAND, WLAN_PARAM_Integer,
hdd_config_t, nRoamIntraBand,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_INTRA_BAND_DEFAULT,
CFG_ROAM_INTRA_BAND_MIN,
CFG_ROAM_INTRA_BAND_MAX,
cbNotifySetRoamIntraBand, 0 ),
REG_DYNAMIC_VARIABLE( CFG_ROAM_SCAN_N_PROBES, WLAN_PARAM_Integer,
hdd_config_t, nProbes,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_SCAN_N_PROBES_DEFAULT,
CFG_ROAM_SCAN_N_PROBES_MIN,
CFG_ROAM_SCAN_N_PROBES_MAX,
cbNotifySetRoamScanNProbes, 0 ),
REG_DYNAMIC_VARIABLE( CFG_ROAM_SCAN_HOME_AWAY_TIME, WLAN_PARAM_Integer,
hdd_config_t, nRoamScanHomeAwayTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAM_SCAN_HOME_AWAY_TIME_DEFAULT,
CFG_ROAM_SCAN_HOME_AWAY_TIME_MIN,
CFG_ROAM_SCAN_HOME_AWAY_TIME_MAX,
cbNotifySetRoamScanHomeAwayTime, 0 ),
#endif
REG_VARIABLE( CFG_P2P_DEVICE_ADDRESS_ADMINISTRATED_NAME, WLAN_PARAM_Integer,
hdd_config_t, isP2pDeviceAddrAdministrated,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_P2P_DEVICE_ADDRESS_ADMINISTRATED_DEFAULT,
CFG_P2P_DEVICE_ADDRESS_ADMINISTRATED_MIN,
CFG_P2P_DEVICE_ADDRESS_ADMINISTRATED_MAX ),
REG_VARIABLE( CFG_ENABLE_MCC_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableMCC,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MCC_ENABLED_DEFAULT,
CFG_ENABLE_MCC_ENABLED_MIN,
CFG_ENABLE_MCC_ENABLED_MAX ),
REG_VARIABLE( CFG_ALLOW_MCC_GO_DIFF_BI_NAME, WLAN_PARAM_Integer,
hdd_config_t, allowMCCGODiffBI,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ALLOW_MCC_GO_DIFF_BI_DEFAULT,
CFG_ALLOW_MCC_GO_DIFF_BI_MIN,
CFG_ALLOW_MCC_GO_DIFF_BI_MAX ),
REG_VARIABLE( CFG_THERMAL_MIGRATION_ENABLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, thermalMitigationEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_THERMAL_MIGRATION_ENABLE_DEFAULT,
CFG_THERMAL_MIGRATION_ENABLE_MIN,
CFG_THERMAL_MIGRATION_ENABLE_MAX ),
REG_VARIABLE( CFG_ENABLE_MODULATED_DTIM_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableModulatedDTIM,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MODULATED_DTIM_DEFAULT,
CFG_ENABLE_MODULATED_DTIM_MIN,
CFG_ENABLE_MODULATED_DTIM_MAX ),
REG_VARIABLE( CFG_MC_ADDR_LIST_ENABLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableMCAddrList,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MC_ADDR_LIST_ENABLE_DEFAULT,
CFG_MC_ADDR_LIST_ENABLE_MIN,
CFG_MC_ADDR_LIST_ENABLE_MAX ),
#ifdef WLAN_FEATURE_11AC
REG_VARIABLE( CFG_VHT_CHANNEL_WIDTH, WLAN_PARAM_Integer,
hdd_config_t, vhtChannelWidth,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_VHT_CHANNEL_WIDTH_DEFAULT,
CFG_VHT_CHANNEL_WIDTH_MIN,
CFG_VHT_CHANNEL_WIDTH_MAX),
REG_VARIABLE( CFG_VHT_ENABLE_RX_MCS_8_9, WLAN_PARAM_Integer,
hdd_config_t, vhtRxMCS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_VHT_ENABLE_RX_MCS_8_9_DEFAULT,
CFG_VHT_ENABLE_RX_MCS_8_9_MIN,
CFG_VHT_ENABLE_RX_MCS_8_9_MAX),
REG_VARIABLE( CFG_VHT_ENABLE_TX_MCS_8_9, WLAN_PARAM_Integer,
hdd_config_t, vhtTxMCS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_VHT_ENABLE_TX_MCS_8_9_DEFAULT,
CFG_VHT_ENABLE_TX_MCS_8_9_MIN,
CFG_VHT_ENABLE_TX_MCS_8_9_MAX),
REG_VARIABLE( CFG_VHT_AMPDU_LEN_EXP_NAME, WLAN_PARAM_Integer,
hdd_config_t, gVhtMaxAmpduLenExp,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VHT_AMPDU_LEN_EXP_DEFAULT,
CFG_VHT_AMPDU_LEN_EXP_MIN,
CFG_VHT_AMPDU_LEN_EXP_MAX ),
#endif
REG_VARIABLE( CFG_ENABLE_FIRST_SCAN_2G_ONLY_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableFirstScan2GOnly,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_FIRST_SCAN_2G_ONLY_DEFAULT,
CFG_ENABLE_FIRST_SCAN_2G_ONLY_MIN,
CFG_ENABLE_FIRST_SCAN_2G_ONLY_MAX ),
REG_VARIABLE( CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH_NAME, WLAN_PARAM_Integer,
hdd_config_t, skipDfsChnlInP2pSearch,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH_DEFAULT,
CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH_MIN,
CFG_ENABLE_SKIP_DFS_IN_P2P_SEARCH_MAX ),
REG_VARIABLE( CFG_IGNORE_DYNAMIC_DTIM_IN_P2P_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, ignoreDynamicDtimInP2pMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IGNORE_DYNAMIC_DTIM_IN_P2P_MODE_DEFAULT,
CFG_IGNORE_DYNAMIC_DTIM_IN_P2P_MODE_MIN,
CFG_IGNORE_DYNAMIC_DTIM_IN_P2P_MODE_MAX ),
REG_VARIABLE( CFG_NUM_BUFF_ADVERT_NAME, WLAN_PARAM_Integer,
hdd_config_t,numBuffAdvert ,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_NUM_BUFF_ADVERT_DEFAULT,
CFG_NUM_BUFF_ADVERT_MIN,
CFG_NUM_BUFF_ADVERT_MAX ),
REG_VARIABLE( CFG_MCC_CONFIG_PARAM_NAME, WLAN_PARAM_Integer,
hdd_config_t, configMccParam,
VAR_FLAGS_OPTIONAL,
CFG_MCC_CONFIG_PARAM_DEFAULT,
CFG_MCC_CONFIG_PARAM_MIN,
CFG_MCC_CONFIG_PARAM_MAX ),
REG_VARIABLE( CFG_ENABLE_RX_STBC, WLAN_PARAM_Integer,
hdd_config_t, enableRxSTBC,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_RX_STBC_DEFAULT,
CFG_ENABLE_RX_STBC_MIN,
CFG_ENABLE_RX_STBC_MAX ),
#ifdef FEATURE_WLAN_TDLS
REG_VARIABLE( CFG_TDLS_SUPPORT_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSSupport,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_SUPPORT_ENABLE_DEFAULT,
CFG_TDLS_SUPPORT_ENABLE_MIN,
CFG_TDLS_SUPPORT_ENABLE_MAX ),
REG_VARIABLE( CFG_TDLS_IMPLICIT_TRIGGER, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSImplicitTrigger,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_IMPLICIT_TRIGGER_DEFAULT,
CFG_TDLS_IMPLICIT_TRIGGER_MIN,
CFG_TDLS_IMPLICIT_TRIGGER_MAX ),
REG_VARIABLE( CFG_TDLS_TX_STATS_PERIOD, WLAN_PARAM_Integer,
hdd_config_t, fTDLSTxStatsPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_TX_STATS_PERIOD_DEFAULT,
CFG_TDLS_TX_STATS_PERIOD_MIN,
CFG_TDLS_TX_STATS_PERIOD_MAX ),
REG_VARIABLE( CFG_TDLS_TX_PACKET_THRESHOLD, WLAN_PARAM_Integer,
hdd_config_t, fTDLSTxPacketThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_TX_PACKET_THRESHOLD_DEFAULT,
CFG_TDLS_TX_PACKET_THRESHOLD_MIN,
CFG_TDLS_TX_PACKET_THRESHOLD_MAX ),
REG_VARIABLE( CFG_TDLS_DISCOVERY_PERIOD, WLAN_PARAM_Integer,
hdd_config_t, fTDLSDiscoveryPeriod,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_DISCOVERY_PERIOD_DEFAULT,
CFG_TDLS_DISCOVERY_PERIOD_MIN,
CFG_TDLS_DISCOVERY_PERIOD_MAX ),
REG_VARIABLE( CFG_TDLS_MAX_DISCOVERY_ATTEMPT, WLAN_PARAM_Integer,
hdd_config_t, fTDLSMaxDiscoveryAttempt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_MAX_DISCOVERY_ATTEMPT_DEFAULT,
CFG_TDLS_MAX_DISCOVERY_ATTEMPT_MIN,
CFG_TDLS_MAX_DISCOVERY_ATTEMPT_MAX ),
REG_VARIABLE( CFG_TDLS_IDLE_TIMEOUT, WLAN_PARAM_Integer,
hdd_config_t, fTDLSIdleTimeout,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_IDLE_TIMEOUT_DEFAULT,
CFG_TDLS_IDLE_TIMEOUT_MIN,
CFG_TDLS_IDLE_TIMEOUT_MAX ),
REG_VARIABLE( CFG_TDLS_IDLE_PACKET_THRESHOLD, WLAN_PARAM_Integer,
hdd_config_t, fTDLSIdlePacketThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_IDLE_PACKET_THRESHOLD_DEFAULT,
CFG_TDLS_IDLE_PACKET_THRESHOLD_MIN,
CFG_TDLS_IDLE_PACKET_THRESHOLD_MAX ),
REG_VARIABLE( CFG_TDLS_RSSI_HYSTERESIS, WLAN_PARAM_Integer,
hdd_config_t, fTDLSRSSIHysteresis,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_RSSI_HYSTERESIS_DEFAULT,
CFG_TDLS_RSSI_HYSTERESIS_MIN,
CFG_TDLS_RSSI_HYSTERESIS_MAX ),
REG_VARIABLE( CFG_TDLS_RSSI_TRIGGER_THRESHOLD, WLAN_PARAM_SignedInteger,
hdd_config_t, fTDLSRSSITriggerThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_RSSI_TRIGGER_THRESHOLD_DEFAULT,
CFG_TDLS_RSSI_TRIGGER_THRESHOLD_MIN,
CFG_TDLS_RSSI_TRIGGER_THRESHOLD_MAX ),
REG_VARIABLE( CFG_TDLS_RSSI_TEARDOWN_THRESHOLD, WLAN_PARAM_SignedInteger,
hdd_config_t, fTDLSRSSITeardownThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_RSSI_TEARDOWN_THRESHOLD_DEFAULT,
CFG_TDLS_RSSI_TEARDOWN_THRESHOLD_MIN,
CFG_TDLS_RSSI_TEARDOWN_THRESHOLD_MAX ),
REG_VARIABLE( CFG_TDLS_QOS_WMM_UAPSD_MASK_NAME , WLAN_PARAM_HexInteger,
hdd_config_t, fTDLSUapsdMask,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_QOS_WMM_UAPSD_MASK_DEFAULT,
CFG_TDLS_QOS_WMM_UAPSD_MASK_MIN,
CFG_TDLS_QOS_WMM_UAPSD_MASK_MAX ),
REG_VARIABLE( CFG_TDLS_BUFFER_STA_SUPPORT_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSBufferSta,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_BUFFER_STA_SUPPORT_ENABLE_DEFAULT,
CFG_TDLS_BUFFER_STA_SUPPORT_ENABLE_MIN,
CFG_TDLS_BUFFER_STA_SUPPORT_ENABLE_MAX ),
REG_VARIABLE( CFG_TDLS_OFF_CHANNEL_SUPPORT_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSOffChannel,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_OFF_CHANNEL_SUPPORT_ENABLE_DEFAULT,
CFG_TDLS_OFF_CHANNEL_SUPPORT_ENABLE_MIN,
CFG_TDLS_OFF_CHANNEL_SUPPORT_ENABLE_MAX ),
REG_VARIABLE( CFG_TDLS_PUAPSD_INACTIVITY_TIME, WLAN_PARAM_Integer,
hdd_config_t, fTDLSPuapsdInactivityTimer,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_PUAPSD_INACTIVITY_TIME_DEFAULT,
CFG_TDLS_PUAPSD_INACTIVITY_TIME_MIN,
CFG_TDLS_PUAPSD_INACTIVITY_TIME_MAX ),
REG_VARIABLE( CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD, WLAN_PARAM_Integer,
hdd_config_t, fTDLSRxFrameThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD_DEFAULT,
CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD_MIN,
CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD_MAX ),
REG_VARIABLE( CFG_TDLS_EXTERNAL_CONTROL, WLAN_PARAM_Integer,
hdd_config_t, fTDLSExternalControl,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_EXTERNAL_CONTROL_DEFAULT,
CFG_TDLS_EXTERNAL_CONTROL_MIN,
CFG_TDLS_EXTERNAL_CONTROL_MAX ),
REG_VARIABLE( CFG_TDLS_WMM_MODE_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSWmmMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_WMM_MODE_ENABLE_DEFAULT,
CFG_TDLS_WMM_MODE_ENABLE_MIN,
CFG_TDLS_WMM_MODE_ENABLE_MAX ),
REG_VARIABLE( CFG_TDLS_SCAN_COEX_SUPPORT_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSScanCoexSupport,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_SCAN_COEX_SUPPORT_ENABLE_DEFAULT,
CFG_TDLS_SCAN_COEX_SUPPORT_ENABLE_MIN,
CFG_TDLS_SCAN_COEX_SUPPORT_ENABLE_MAX ),
REG_VARIABLE( CFG_TDLS_SCAN_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableTDLSScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TDLS_SCAN_ENABLE_DEFAULT,
CFG_TDLS_SCAN_ENABLE_MIN,
CFG_TDLS_SCAN_ENABLE_MAX ),
#endif
#ifdef WLAN_FEATURE_LINK_LAYER_STATS
REG_VARIABLE( CFG_LINK_LAYER_STATS_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableLLStats,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_LAYER_STATS_ENABLE_DEFAULT,
CFG_LINK_LAYER_STATS_ENABLE_MIN,
CFG_LINK_LAYER_STATS_ENABLE_MAX ),
#endif
#ifdef WLAN_FEATURE_EXTSCAN
REG_VARIABLE( CFG_EXTSCAN_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableEXTScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_EXTSCAN_ENABLE_DEFAULT,
CFG_EXTSCAN_ENABLE_MIN,
CFG_EXTSCAN_ENABLE_MAX ),
#endif
#ifdef WLAN_SOFTAP_VSTA_FEATURE
REG_VARIABLE( CFG_VSTA_SUPPORT_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableVSTASupport,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VSTA_SUPPORT_ENABLE_DEFAULT,
CFG_VSTA_SUPPORT_ENABLE_MIN,
CFG_VSTA_SUPPORT_ENABLE_MAX ),
#endif
REG_VARIABLE( CFG_ENABLE_LPWR_IMG_TRANSITION_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableLpwrImgTransition,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_LPWR_IMG_TRANSITION_DEFAULT,
CFG_ENABLE_LPWR_IMG_TRANSITION_MIN,
CFG_ENABLE_LPWR_IMG_TRANSITION_MAX ),
#ifdef WLAN_ACTIVEMODE_OFFLOAD_FEATURE
REG_VARIABLE( CFG_ACTIVEMODE_OFFLOAD_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, fEnableActiveModeOffload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ACTIVEMODE_OFFLOAD_ENABLE_DEFAULT,
CFG_ACTIVEMODE_OFFLOAD_ENABLE_MIN,
CFG_ACTIVEMODE_OFFLOAD_ENABLE_MAX ),
#endif
REG_VARIABLE( CFG_SCAN_AGING_PARAM_NAME, WLAN_PARAM_Integer,
hdd_config_t, scanAgingTimeout,
VAR_FLAGS_OPTIONAL,
CFG_SCAN_AGING_PARAM_DEFAULT,
CFG_SCAN_AGING_PARAM_MIN,
CFG_SCAN_AGING_PARAM_MAX ),
REG_VARIABLE( CFG_TX_LDPC_ENABLE_FEATURE, WLAN_PARAM_Integer,
hdd_config_t, enableTxLdpc,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TX_LDPC_ENABLE_FEATURE_DEFAULT,
CFG_TX_LDPC_ENABLE_FEATURE_MIN,
CFG_TX_LDPC_ENABLE_FEATURE_MAX ),
REG_VARIABLE( CFG_ENABLE_MCC_ADATIVE_SCHEDULER_ENABLED_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableMCCAdaptiveScheduler,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MCC_ADATIVE_SCHEDULER_ENABLED_DEFAULT,
CFG_ENABLE_MCC_ADATIVE_SCHEDULER_ENABLED_MIN,
CFG_ENABLE_MCC_ADATIVE_SCHEDULER_ENABLED_MAX ),
REG_VARIABLE( CFG_ANDRIOD_POWER_SAVE_NAME, WLAN_PARAM_Integer,
hdd_config_t, isAndroidPsEn,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ANDRIOD_POWER_SAVE_DEFAULT,
CFG_ANDRIOD_POWER_SAVE_MIN,
CFG_ANDRIOD_POWER_SAVE_MAX),
REG_VARIABLE( CFG_IBSS_ADHOC_CHANNEL_5GHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, AdHocChannel5G,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IBSS_ADHOC_CHANNEL_5GHZ_DEFAULT,
CFG_IBSS_ADHOC_CHANNEL_5GHZ_MIN,
CFG_IBSS_ADHOC_CHANNEL_5GHZ_MAX),
REG_VARIABLE( CFG_IBSS_ADHOC_CHANNEL_24GHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, AdHocChannel24G,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IBSS_ADHOC_CHANNEL_24GHZ_DEFAULT,
CFG_IBSS_ADHOC_CHANNEL_24GHZ_MIN,
CFG_IBSS_ADHOC_CHANNEL_24GHZ_MAX),
#ifdef WLAN_FEATURE_11AC
REG_VARIABLE( CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE, WLAN_PARAM_Integer,
hdd_config_t, enableTxBF,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_DEFAULT,
CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_MIN,
CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_MAX ),
REG_VARIABLE( CFG_VHT_CSN_BEAMFORMEE_ANT_SUPPORTED, WLAN_PARAM_Integer,
hdd_config_t, txBFCsnValue,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VHT_CSN_BEAMFORMEE_ANT_SUPPORTED_DEFAULT,
CFG_VHT_CSN_BEAMFORMEE_ANT_SUPPORTED_MIN,
CFG_VHT_CSN_BEAMFORMEE_ANT_SUPPORTED_MAX ),
REG_VARIABLE( CFG_VHT_ENABLE_MU_BFORMEE_CAP_FEATURE, WLAN_PARAM_Integer,
hdd_config_t, enableMuBformee,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_VHT_ENABLE_MU_BFORMEE_CAP_FEATURE_DEFAULT,
CFG_VHT_ENABLE_MU_BFORMEE_CAP_FEATURE_MIN,
CFG_VHT_ENABLE_MU_BFORMEE_CAP_FEATURE_MAX ),
#endif
REG_VARIABLE( CFG_SAP_ALLOW_ALL_CHANNEL_PARAM_NAME, WLAN_PARAM_Integer,
hdd_config_t, sapAllowAllChannel,
VAR_FLAGS_OPTIONAL,
CFG_SAP_ALLOW_ALL_CHANNEL_PARAM_DEFAULT,
CFG_SAP_ALLOW_ALL_CHANNEL_PARAM_MIN,
CFG_SAP_ALLOW_ALL_CHANNEL_PARAM_MAX ),
#ifdef WLAN_FEATURE_11AC
REG_VARIABLE( CFG_DISABLE_LDPC_WITH_TXBF_AP, WLAN_PARAM_Integer,
hdd_config_t, disableLDPCWithTxbfAP,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DISABLE_LDPC_WITH_TXBF_AP_DEFAULT,
CFG_DISABLE_LDPC_WITH_TXBF_AP_MIN,
CFG_DISABLE_LDPC_WITH_TXBF_AP_MAX ),
#endif
REG_VARIABLE_STRING( CFG_LIST_OF_NON_DFS_COUNTRY_CODE, WLAN_PARAM_String,
hdd_config_t, listOfNonDfsCountryCode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
(void *)CFG_LIST_OF_NON_DFS_COUNTRY_CODE_DEFAULT),
REG_DYNAMIC_VARIABLE( CFG_ENABLE_SSR, WLAN_PARAM_Integer,
hdd_config_t, enableSSR,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_SSR_DEFAULT,
CFG_ENABLE_SSR_MIN,
CFG_ENABLE_SSR_MAX,
cbNotifySetEnableSSR, 0 ),
REG_VARIABLE_STRING( CFG_LIST_OF_NON_11AC_COUNTRY_CODE, WLAN_PARAM_String,
hdd_config_t, listOfNon11acCountryCode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
(void *)CFG_LIST_OF_NON_11AC_COUNTRY_CODE_DEFAULT),
REG_VARIABLE(CFG_MAX_MEDIUM_TIME, WLAN_PARAM_Integer,
hdd_config_t, cfgMaxMediumTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_MEDIUM_TIME_STADEFAULT,
CFG_MAX_MEDIUM_TIME_STAMIN,
CFG_MAX_MEDIUM_TIME_STAMAX ),
REG_VARIABLE( CFG_ENABLE_TRAFFIC_MONITOR, WLAN_PARAM_Integer,
hdd_config_t, enableTrafficMonitor,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_TRAFFIC_MONITOR_DEFAULT,
CFG_ENABLE_TRAFFIC_MONITOR_MIN,
CFG_ENABLE_TRAFFIC_MONITOR_MAX),
REG_VARIABLE( CFG_TRAFFIC_IDLE_TIMEOUT, WLAN_PARAM_Integer,
hdd_config_t, trafficIdleTimeout,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TRAFFIC_IDLE_TIMEOUT_DEFAULT,
CFG_TRAFFIC_IDLE_TIMEOUT_MIN,
CFG_TRAFFIC_IDLE_TIMEOUT_MAX),
#ifdef WLAN_FEATURE_11AC
REG_VARIABLE( CFG_ENABLE_VHT_FOR_24GHZ_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableVhtFor24GHzBand,
VAR_FLAGS_OPTIONAL,
CFG_ENABLE_VHT_FOR_24GHZ_DEFAULT,
CFG_ENABLE_VHT_FOR_24GHZ_MIN,
CFG_ENABLE_VHT_FOR_24GHZ_MAX),
#endif
REG_VARIABLE( CFG_SCAN_OFFLOAD_NAME, WLAN_PARAM_Integer,
hdd_config_t, fScanOffload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SCAN_OFFLOAD_DEFAULT,
CFG_SCAN_OFFLOAD_DISABLE,
CFG_SCAN_OFFLOAD_ENABLE ),
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
REG_DYNAMIC_VARIABLE( CFG_ENABLE_FAST_ROAM_IN_CONCURRENCY, WLAN_PARAM_Integer,
hdd_config_t, bFastRoamInConIniFeatureEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_FAST_ROAM_IN_CONCURRENCY_DEFAULT,
CFG_ENABLE_FAST_ROAM_IN_CONCURRENCY_MIN,
CFG_ENABLE_FAST_ROAM_IN_CONCURRENCY_MAX,
cbNotifySetEnableFastRoamInConcurrency, 0 ),
#endif
REG_VARIABLE( CFG_ENABLE_ADAPT_RX_DRAIN_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableAdaptRxDrain,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK ,
CFG_ENABLE_ADAPT_RX_DRAIN_DEFAULT,
CFG_ENABLE_ADAPT_RX_DRAIN_MIN,
CFG_ENABLE_ADAPT_RX_DRAIN_MAX),
REG_VARIABLE( CFG_DYNAMIC_SPLIT_SCAN_NAME, WLAN_PARAM_Integer,
hdd_config_t, dynSplitscan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DYNAMIC_SPLIT_SCAN_DEFAULT,
CFG_DYNAMIC_SPLIT_SCAN_MIN,
CFG_DYNAMIC_SPLIT_SCAN_MAX ),
REG_VARIABLE( CFG_SPLIT_SCAN_TRAFFIC_MONITOR_THRSHLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, txRxThresholdForSplitScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_THRSHLD_DEFAULT,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_THRSHLD_MIN,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_THRSHLD_MAX ),
REG_VARIABLE( CFG_SPLIT_SCAN_TRAFFIC_MONITOR_TIMER_NAME, WLAN_PARAM_Integer,
hdd_config_t, trafficMntrTmrForSplitScan,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_TIMER_DEFAULT,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_TIMER_MIN,
CFG_SPLIT_SCAN_TRAFFIC_MONITOR_TIMER_MAX ),
REG_VARIABLE( CFG_FLEX_CONNECT_POWER_FACTOR_NAME, WLAN_PARAM_Integer,
hdd_config_t, flexConnectPowerFactor,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_MINMAX,
CFG_FLEX_CONNECT_POWER_FACTOR_DEFAULT,
CFG_FLEX_CONNECT_POWER_FACTOR_MIN,
CFG_FLEX_CONNECT_POWER_FACTOR_MAX ),
REG_VARIABLE( CFG_ENABLE_HEART_BEAT_OFFLOAD, WLAN_PARAM_Integer,
hdd_config_t, enableIbssHeartBeatOffload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_HEART_BEAT_OFFLOAD_DEFAULT,
CFG_ENABLE_HEART_BEAT_OFFLOAD_MIN,
CFG_ENABLE_HEART_BEAT_OFFLOAD_MAX),
REG_VARIABLE( CFG_ANTENNA_DIVERSITY_PARAM_NAME, WLAN_PARAM_Integer,
hdd_config_t, antennaDiversity,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ANTENNA_DIVERSITY_PARAM_DEFAULT,
CFG_ANTENNA_DIVERSITY_PARAM_MIN,
CFG_ANTENNA_DIVERSITY_PARAM_MAX),
REG_VARIABLE( CFG_ENABLE_SNR_MONITORING_NAME, WLAN_PARAM_Integer,
hdd_config_t, fEnableSNRMonitoring,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK ,
CFG_ENABLE_SNR_MONITORING_DEFAULT,
CFG_ENABLE_SNR_MONITORING_MIN,
CFG_ENABLE_SNR_MONITORING_MAX),
#ifdef FEATURE_WLAN_SCAN_PNO
REG_VARIABLE( CFG_PNO_SCAN_SUPPORT, WLAN_PARAM_Integer,
hdd_config_t, configPNOScanSupport,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PNO_SCAN_SUPPORT_DEFAULT,
CFG_PNO_SCAN_SUPPORT_DISABLE,
CFG_PNO_SCAN_SUPPORT_ENABLE),
REG_VARIABLE( CFG_PNO_SCAN_TIMER_REPEAT_VALUE, WLAN_PARAM_Integer,
hdd_config_t, configPNOScanTimerRepeatValue,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PNO_SCAN_TIMER_REPEAT_VALUE_DEFAULT,
CFG_PNO_SCAN_TIMER_REPEAT_VALUE_MIN,
CFG_PNO_SCAN_TIMER_REPEAT_VALUE_MAX),
#endif
REG_VARIABLE( CFG_AMSDU_SUPPORT_IN_AMPDU_NAME , WLAN_PARAM_Integer,
hdd_config_t, isAmsduSupportInAMPDU,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_AMSDU_SUPPORT_IN_AMPDU_DEFAULT,
CFG_AMSDU_SUPPORT_IN_AMPDU_MIN,
CFG_AMSDU_SUPPORT_IN_AMPDU_MAX ),
REG_VARIABLE( CFG_STRICT_5GHZ_PREF_BY_MARGIN , WLAN_PARAM_Integer,
hdd_config_t, nSelect5GHzMargin,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_STRICT_5GHZ_PREF_BY_MARGIN_DEFAULT,
CFG_STRICT_5GHZ_PREF_BY_MARGIN_MIN,
CFG_STRICT_5GHZ_PREF_BY_MARGIN_MAX ),
REG_VARIABLE( CFG_COALESING_IN_IBSS_NAME , WLAN_PARAM_Integer,
hdd_config_t, isCoalesingInIBSSAllowed,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_COALESING_IN_IBSS_DEFAULT,
CFG_COALESING_IN_IBSS_MIN,
CFG_COALESING_IN_IBSS_MAX ),
REG_VARIABLE( CFG_DISABLE_ATH_NAME , WLAN_PARAM_Integer,
hdd_config_t, cfgAthDisable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DISABLE_ATH_DEFAULT,
CFG_DISABLE_ATH_MIN,
CFG_DISABLE_ATH_MAX ),
REG_VARIABLE(CFG_BTC_ACTIVE_WLAN_LEN_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcActiveWlanLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_ACTIVE_WLAN_LEN_DEFAULT,
CFG_BTC_ACTIVE_WLAN_LEN_MIN,
CFG_BTC_ACTIVE_WLAN_LEN_MAX ),
REG_VARIABLE(CFG_BTC_ACTIVE_BT_LEN_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcActiveBtLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_ACTIVE_BT_LEN_DEFAULT,
CFG_BTC_ACTIVE_BT_LEN_MIN,
CFG_BTC_ACTIVE_BT_LEN_MAX ),
REG_VARIABLE(CFG_BTC_SAP_ACTIVE_WLAN_LEN_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcSapActiveWlanLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_SAP_ACTIVE_WLAN_LEN_DEFAULT,
CFG_BTC_SAP_ACTIVE_WLAN_LEN_MIN,
CFG_BTC_SAP_ACTIVE_WLAN_LEN_MAX ),
REG_VARIABLE(CFG_BTC_SAP_ACTIVE_BT_LEN_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcSapActiveBtLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_SAP_ACTIVE_BT_LEN_DEFAULT,
CFG_BTC_SAP_ACTIVE_BT_LEN_MIN,
CFG_BTC_SAP_ACTIVE_BT_LEN_MAX ),
#ifdef MEMORY_DEBUG
REG_VARIABLE(CFG_ENABLE_MEMORY_DEBUG_NAME, WLAN_PARAM_Integer,
hdd_config_t, IsMemoryDebugSupportEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MEMORY_DEBUG_DEFAULT,
CFG_ENABLE_MEMORY_DEBUG_MIN,
CFG_ENABLE_MEMORY_DEBUG_MAX ),
#endif
REG_VARIABLE_STRING( CFG_OVERRIDE_COUNTRY_CODE, WLAN_PARAM_String,
hdd_config_t, overrideCountryCode,
VAR_FLAGS_OPTIONAL,
(void *)CFG_OVERRIDE_COUNTRY_CODE_DEFAULT),
REG_VARIABLE( CFG_ASD_PROBE_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, gAsdProbeInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ASD_PROBE_INTERVAL_DEFAULT,
CFG_ASD_PROBE_INTERVAL_MIN,
CFG_ASD_PROBE_INTERVAL_MAX),
REG_VARIABLE( CFG_ASD_TRIGGER_THRESHOLD_NAME, WLAN_PARAM_SignedInteger,
hdd_config_t, gAsdTriggerThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ASD_TRIGGER_THRESHOLD_DEFAULT,
CFG_ASD_TRIGGER_THRESHOLD_MIN,
CFG_ASD_TRIGGER_THRESHOLD_MAX),
REG_VARIABLE( CFG_ASD_RTT_RSSI_HYST_THRESHOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, gAsdRTTRssiHystThreshold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ASD_RTT_RSSI_HYST_THRESHOLD_DEFAULT,
CFG_ASD_RTT_RSSI_HYST_THRESHOLD_MIN,
CFG_ASD_RTT_RSSI_HYST_THRESHOLD_MAX),
REG_VARIABLE( CFG_DEBUG_P2P_REMAIN_ON_CHANNEL_NAME, WLAN_PARAM_Integer,
hdd_config_t, debugP2pRemainOnChannel,
VAR_FLAGS_OPTIONAL,
CFG_DEBUG_P2P_REMAIN_ON_CHANNEL_DEFAULT,
CFG_DEBUG_P2P_REMAIN_ON_CHANNEL_MIN,
CFG_DEBUG_P2P_REMAIN_ON_CHANNEL_MAX ),
REG_VARIABLE(CFG_CTS2S_DURING_BTC_SCO_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcCTS2SduringSCO,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_CTS2S_DURING_BTC_SCO_DEFAULT,
CFG_CTS2S_DURING_BTC_SCO_MIN,
CFG_CTS2S_DURING_BTC_SCO_MAX ),
REG_VARIABLE( CFG_ENABLE_DEBUG_CONNECT_ISSUE, WLAN_PARAM_Integer,
hdd_config_t, gEnableDebugLog,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DEBUG_CONNECT_ISSUE_DEFAULT,
CFG_ENABLE_DEBUG_CONNECT_ISSUE_MIN ,
CFG_ENABLE_DEBUG_CONNECT_ISSUE_MAX),
REG_VARIABLE(CFG_OBSS_HT40_SCAN_ACTIVE_DWELL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nOBSSScanActiveDwellTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OBSS_HT40_SCAN_ACTIVE_DWELL_TIME_DEFAULT,
CFG_OBSS_HT40_SCAN_ACTIVE_DWELL_TIME_MIN,
CFG_OBSS_HT40_SCAN_ACTIVE_DWELL_TIME_MAX ),
REG_VARIABLE(CFG_OBSS_HT40_SCAN_PASSIVE_DWELL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nOBSSScanPassiveDwellTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OBSS_HT40_SCAN_PASSIVE_DWELL_TIME_DEFAULT,
CFG_OBSS_HT40_SCAN_PASSIVE_DWELL_TIME_MIN,
CFG_OBSS_HT40_SCAN_PASSIVE_DWELL_TIME_MAX ),
REG_VARIABLE(CFG_OBSS_HT40_SCAN_WIDTH_TRIGGER_INTERVAL_NAME,
WLAN_PARAM_Integer,
hdd_config_t, nOBSSScanWidthTriggerInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OBSS_HT40_SCAN_WIDTH_TRIGGER_INTERVAL_DEFAULT,
CFG_OBSS_HT40_SCAN_WIDTH_TRIGGER_INTERVAL_MIN,
CFG_OBSS_HT40_SCAN_WIDTH_TRIGGER_INTERVAL_MAX ),
REG_VARIABLE( CFG_ENABLE_STRICT_REGULATORY_FOR_FCC_NAME, WLAN_PARAM_Integer,
hdd_config_t, gEnableStrictRegulatoryForFCC,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_STRICT_REGULATORY_FOR_FCC_DEFAULT,
CFG_ENABLE_STRICT_REGULATORY_FOR_FCC_MIN,
CFG_ENABLE_STRICT_REGULATORY_FOR_FCC_MAX ),
REG_VARIABLE( CFG_ADVERTISE_CONCURRENT_OPERATION_NAME , WLAN_PARAM_Integer,
hdd_config_t, advertiseConcurrentOperation,
VAR_FLAGS_OPTIONAL,
CFG_ADVERTISE_CONCURRENT_OPERATION_DEFAULT,
CFG_ADVERTISE_CONCURRENT_OPERATION_MIN,
CFG_ADVERTISE_CONCURRENT_OPERATION_MAX ),
REG_VARIABLE( CFG_DEFAULT_RATE_INDEX_24GH, WLAN_PARAM_Integer,
hdd_config_t, defaultRateIndex24Ghz,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DEFAULT_RATE_INDEX_24GH_DEFAULT,
CFG_DEFAULT_RATE_INDEX_24GH_MIN,
CFG_DEFAULT_RATE_INDEX_24GH_MAX ),
REG_VARIABLE( CFG_SAP_ENABLE_11AC_NAME, WLAN_PARAM_Integer,
hdd_config_t, sapEnable11AC,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SAP_ENABLE_11AC_DEFAULT,
CFG_SAP_ENABLE_11AC_MIN,
CFG_SAP_ENABLE_11AC_MAX ),
REG_VARIABLE(CFG_RA_FILTER_ENABLE_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgRAFilterEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RA_FILTER_ENABLE_DEFAULT,
CFG_RA_FILTER_ENABLE_MIN,
CFG_RA_FILTER_ENABLE_MAX ),
REG_VARIABLE(CFG_RA_RATE_LIMIT_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgRARateLimitInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RA_RATE_LIMIT_INTERVAL_DEFAULT,
CFG_RA_RATE_LIMIT_INTERVAL_MIN,
CFG_RA_RATE_LIMIT_INTERVAL_MAX ),
REG_VARIABLE( CFG_ROAMING_DFS_CHANNEL_NAME , WLAN_PARAM_Integer,
hdd_config_t, allowDFSChannelRoam,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ROAMING_DFS_CHANNEL_DEFAULT,
CFG_ROAMING_DFS_CHANNEL_MIN,
CFG_ROAMING_DFS_CHANNEL_MAX ),
#ifdef WLAN_LOGGING_SOCK_SVC_ENABLE
REG_VARIABLE( CFG_WLAN_LOGGING_SUPPORT_NAME, WLAN_PARAM_Integer,
hdd_config_t, wlanLoggingEnable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_WLAN_LOGGING_SUPPORT_DEFAULT,
CFG_WLAN_LOGGING_SUPPORT_DISABLE,
CFG_WLAN_LOGGING_SUPPORT_ENABLE ),
REG_VARIABLE( CFG_WLAN_LOGGING_FE_CONSOLE_SUPPORT_NAME, WLAN_PARAM_Integer,
hdd_config_t, wlanLoggingFEToConsole,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_WLAN_LOGGING_FE_CONSOLE_SUPPORT_DEFAULT,
CFG_WLAN_LOGGING_FE_CONSOLE_SUPPORT_DISABLE,
CFG_WLAN_LOGGING_FE_CONSOLE_SUPPORT_ENABLE ),
REG_VARIABLE( CFG_WLAN_LOGGING_NUM_BUF_NAME, WLAN_PARAM_Integer,
hdd_config_t, wlanLoggingNumBuf,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_WLAN_LOGGING_NUM_BUF_DEFAULT,
CFG_WLAN_LOGGING_NUM_BUF_MIN,
CFG_WLAN_LOGGING_NUM_BUF_MAX ),
#endif //WLAN_LOGGING_SOCK_SVC_ENABLE
REG_VARIABLE( CFG_IGNORE_PEER_ERP_INFO_NAME, WLAN_PARAM_Integer,
hdd_config_t, ignorePeerErpInfo,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IGNORE_PEER_ERP_INFO_DEFAULT,
CFG_IGNORE_PEER_ERP_INFO_MIN,
CFG_IGNORE_PEER_ERP_INFO_MAX ),
REG_VARIABLE( CFG_IGNORE_PEER_HT_MODE_NAME, WLAN_PARAM_Integer,
hdd_config_t, ignorePeerHTopMode,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_IGNORE_PEER_HT_MODE_DEFAULT,
CFG_IGNORE_PEER_HT_MODE_MIN,
CFG_IGNORE_PEER_HT_MODE_MAX ),
REG_VARIABLE(CFG_INITIAL_DWELL_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, nInitialDwellTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_INITIAL_DWELL_TIME_DEFAULT,
CFG_INITIAL_DWELL_TIME_MIN,
CFG_INITIAL_DWELL_TIME_MAX ),
REG_VARIABLE(CFG_INITIAL_SCAN_SKIP_DFS_CH_NAME, WLAN_PARAM_Integer,
hdd_config_t, initialScanSkipDFSCh,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_INITIAL_SCAN_SKIP_DFS_CH_DEFAULT,
CFG_INITIAL_SCAN_SKIP_DFS_CH_MIN,
CFG_INITIAL_SCAN_SKIP_DFS_CH_MAX),
REG_VARIABLE(CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcFatalHidnSniffBlkGuidance,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE_DEFAULT,
CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE_MIN,
CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE_MAX ),
REG_VARIABLE(CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcCriticalHidnSniffBlkGuidance,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE_DEFAULT,
CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE_MIN,
CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE_MAX ),
REG_VARIABLE(CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcA2dpTxQueueThold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD_DEFAULT,
CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD_MIN,
CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD_MAX ),
REG_VARIABLE(CFG_BTC_DYN_OPP_TX_QUEUE_THOLD_NAME, WLAN_PARAM_Integer,
hdd_config_t, cfgBtcOppTxQueueThold,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_DYN_OPP_TX_QUEUE_THOLD_DEFAULT,
CFG_BTC_DYN_OPP_TX_QUEUE_THOLD_MIN,
CFG_BTC_DYN_OPP_TX_QUEUE_THOLD_MAX ),
#ifdef WLAN_FEATURE_11W
REG_VARIABLE(CFG_PMF_SA_QUERY_MAX_RETRIES_NAME, WLAN_PARAM_Integer,
hdd_config_t, pmfSaQueryMaxRetries,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PMF_SA_QUERY_MAX_RETRIES_DEFAULT,
CFG_PMF_SA_QUERY_MAX_RETRIES_MIN,
CFG_PMF_SA_QUERY_MAX_RETRIES_MAX ),
REG_VARIABLE(CFG_PMF_SA_QUERY_RETRY_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, pmfSaQueryRetryInterval,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_PMF_SA_QUERY_RETRY_INTERVAL_DEFAULT,
CFG_PMF_SA_QUERY_RETRY_INTERVAL_MIN,
CFG_PMF_SA_QUERY_RETRY_INTERVAL_MAX ),
#endif
REG_VARIABLE(CFG_DEFER_IMPS_FOR_TIME_NAME, WLAN_PARAM_Integer,
hdd_config_t, deferImpsTime,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DEFER_IMPS_FOR_TIME_DEFAULT,
CFG_DEFER_IMPS_FOR_TIME_MIN,
CFG_DEFER_IMPS_FOR_TIME_MAX),
REG_VARIABLE(CFG_ENABLE_DEAUTH_BEFORE_CONNECTION, WLAN_PARAM_Integer,
hdd_config_t, sendDeauthBeforeCon,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DEAUTH_BEFORE_CONNECTION_DEFAULT,
CFG_ENABLE_DEAUTH_BEFORE_CONNECTION_MIN,
CFG_ENABLE_DEAUTH_BEFORE_CONNECTION_MAX),
REG_VARIABLE(CFG_ENABLE_MAC_ADDR_SPOOFING, WLAN_PARAM_Integer,
hdd_config_t, enableMacSpoofing,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MAC_ADDR_SPOOFING_DEFAULT,
CFG_ENABLE_MAC_ADDR_SPOOFING_MIN,
CFG_ENABLE_MAC_ADDR_SPOOFING_MAX),
REG_VARIABLE(CFG_DISABLE_P2P_MAC_ADDR_SPOOFING, WLAN_PARAM_Integer,
hdd_config_t, disableP2PMacSpoofing,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DISABLE_P2P_MAC_ADDR_SPOOFING_DEFAULT,
CFG_DISABLE_P2P_MAC_ADDR_SPOOFING_MIN,
CFG_DISABLE_P2P_MAC_ADDR_SPOOFING_MAX),
REG_VARIABLE(CFG_ENABLE_MGMT_LOGGING, WLAN_PARAM_Integer,
hdd_config_t, enableMgmtLogging,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_MGMT_LOGGING_DEFAULT,
CFG_ENABLE_MGMT_LOGGING_MIN,
CFG_ENABLE_MGMT_LOGGING_MAX),
REG_VARIABLE(CFG_ENABLE_BMUHW_TRACING, WLAN_PARAM_Integer,
hdd_config_t, enableBMUHWtracing,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_BMUHW_TRACING_DEFAULT,
CFG_ENABLE_BMUHW_TRACING_MIN,
CFG_ENABLE_BMUHW_TRACING_MAX),
REG_VARIABLE(CFG_ENABLE_FW_LOGGING, WLAN_PARAM_Integer,
hdd_config_t, enableFWLogging,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_FW_LOGGING_DEFAULT,
CFG_ENABLE_FW_LOGGING_MIN,
CFG_ENABLE_FW_LOGGING_MAX),
REG_VARIABLE(CFG_ENABLE_FW_CONTINIOUS_LOGGING, WLAN_PARAM_Integer,
hdd_config_t, enableContFWLogging,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_FW_CONTINIOUS_LOGGING_DEFAULT,
CFG_ENABLE_FW_CONTINIOUS_LOGGING_MIN,
CFG_ENABLE_FW_CONTINIOUS_LOGGING_MAX),
REG_VARIABLE(CFG_MIN_LOGGING_BUFFER_SIZE, WLAN_PARAM_Integer,
hdd_config_t, minLoggingBufferSize,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MIN_LOGGING_BUFFER_SIZE_DEFAULT,
CFG_MIN_LOGGING_BUFFER_SIZE_MIN,
CFG_MIN_LOGGING_BUFFER_SIZE_MAX),
REG_VARIABLE(CFG_MAX_LOGGING_BUFFER_SIZE, WLAN_PARAM_Integer,
hdd_config_t, maxLoggingBufferSize,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_LOGGING_BUFFER_SIZE_DEFAULT,
CFG_MAX_LOGGING_BUFFER_SIZE_MIN,
CFG_MAX_LOGGING_BUFFER_SIZE_MAX),
REG_VARIABLE(CFG_ENABLE_CH_AVOID, WLAN_PARAM_Integer,
hdd_config_t, fenableCHAvoidance,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_CH_AVOID_DEFAULT,
CFG_ENABLE_CH_AVOID_MIN,
CFG_ENABLE_CH_AVOID_MAX ),
REG_VARIABLE(CFG_MAX_CONCURRENT_CONNECTIONS_NAME, WLAN_PARAM_Integer,
hdd_config_t, gMaxConcurrentActiveSessions,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_CONCURRENT_CONNECTIONS_DEFAULT,
CFG_MAX_CONCURRENT_CONNECTIONS_MIN,
CFG_MAX_CONCURRENT_CONNECTIONS_MAX ),
#ifdef WLAN_FEATURE_AP_HT40_24G
REG_VARIABLE(CFG_ENABLE_HT_2040_COEX, WLAN_PARAM_Integer,
hdd_config_t, apHT40_24GEnabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_HT_2040_COEX_DEFAULT,
CFG_ENABLE_HT_2040_COEX_MIN,
CFG_ENABLE_HT_2040_COEX_MAX ),
#endif
REG_VARIABLE( CFG_ENABLE_DYNAMIC_WMMPS_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableDynamicWMMPS,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DYNAMIC_WMM_PS_DEFAULT,
CFG_ENABLE_DYNAMIC_WMM_PS_MIN,
CFG_ENABLE_DYNAMIC_WMM_PS_MAX ),
REG_VARIABLE( CFG_MAX_UAPSD_CONSEC_SP_NAME, WLAN_PARAM_Integer,
hdd_config_t, maxUapsdConsecSP,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_UAPSD_CONSEC_SP_DEFAULT,
CFG_MAX_UAPSD_CONSEC_SP_MIN,
CFG_MAX_UAPSD_CONSEC_SP_MAX ),
REG_VARIABLE( CFG_MAX_UAPSD_CONSEC_RX_CNT_NAME, WLAN_PARAM_Integer,
hdd_config_t, maxUapsdConsecRxCnt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_UAPSD_CONSEC_RX_CNT_DEFAULT,
CFG_MAX_UAPSD_CONSEC_RX_CNT_MIN,
CFG_MAX_UAPSD_CONSEC_RX_CNT_MAX ),
REG_VARIABLE( CFG_MAX_UAPSD_CONSEC_TX_CNT_NAME, WLAN_PARAM_Integer,
hdd_config_t, maxUapsdConsecTxCnt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_UAPSD_CONSEC_TX_CNT_DEFAULT,
CFG_MAX_UAPSD_CONSEC_TX_CNT_MIN,
CFG_MAX_UAPSD_CONSEC_TX_CNT_MAX ),
REG_VARIABLE( CFG_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW_NAME, WLAN_PARAM_Integer,
hdd_config_t, uapsdConsecRxCntMeasWindow,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW_DEFAULT,
CFG_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW_MIN,
CFG_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW_MAX ),
REG_VARIABLE( CFG_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW_NAME, WLAN_PARAM_Integer,
hdd_config_t, uapsdConsecTxCntMeasWindow,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW_DEFAULT,
CFG_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW_MIN,
CFG_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW_MAX ),
REG_VARIABLE( CFG_UAPSD_PSPOLL_NAME, WLAN_PARAM_Integer,
hdd_config_t, maxPsPollInWmmUapsdMode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_UAPSD_PSPOLL_DEFAULT,
CFG_UAPSD_PSPOLL_MIN,
CFG_UAPSD_PSPOLL_MAX ),
REG_VARIABLE( CFG_MAX_UAPSD_INACT_INTVL_NAME, WLAN_PARAM_Integer,
hdd_config_t, maxUapsdInactivityIntervals,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MAX_UAPSD_INACT_INTVL_DEFAULT,
CFG_MAX_UAPSD_INACT_INTVL_MIN,
CFG_MAX_UAPSD_INACT_INTVL_MAX ),
REG_VARIABLE( CFG_BURST_MODE_BE_TXOP_VALUE, WLAN_PARAM_Integer,
hdd_config_t, burstModeTXOPValue,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BURST_MODE_BE_TXOP_VALUE_DEFAULT,
CFG_BURST_MODE_BE_TXOP_VALUE_MIN,
CFG_BURST_MODE_BE_TXOP_VALUE_MAX ),
REG_VARIABLE( CFG_SAP_SCAN_BAND_PREFERENCE, WLAN_PARAM_Integer,
hdd_config_t, acsScanBandPreference,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK,
CFG_SAP_SCAN_BAND_PREFERENCE_DEFAULT,
CFG_SAP_SCAN_BAND_PREFERENCE_MIN,
CFG_SAP_SCAN_BAND_PREFERENCE_MAX ),
REG_VARIABLE( CFG_ENABLE_DYNAMIC_RA_START_RATE_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableDynamicRAStartRate,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_DYNAMIC_RA_START_RATE_DEFAULT,
CFG_ENABLE_DYNAMIC_RA_START_RATE_MIN,
CFG_ENABLE_DYNAMIC_RA_START_RATE_MAX),
REG_VARIABLE( CFG_P2P_LISTEN_DEFER_INTERVAL_NAME, WLAN_PARAM_Integer,
hdd_config_t, gP2PListenDeferInterval,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_P2P_LISTEN_DEFER_INTERVAL_DEFAULT,
CFG_P2P_LISTEN_DEFER_INTERVAL_MIN,
CFG_P2P_LISTEN_DEFER_INTERVAL_MAX),
REG_VARIABLE( CFG_BTC_ENABLE_IND_TIMER_VALUE, WLAN_PARAM_Integer,
hdd_config_t, btcEnableIndTimerVal,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_ENABLE_IND_TIMER_VALUE_DEFAULT,
CFG_BTC_ENABLE_IND_TIMER_VALUE_MIN,
CFG_BTC_ENABLE_IND_TIMER_VALUE_MAX),
REG_VARIABLE( CFG_BTC_FAST_WLAN_CONN_PREF , WLAN_PARAM_Integer,
hdd_config_t, btcFastWlanConnPref,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_FAST_WLAN_CONN_PREF_DEFAULT,
CFG_BTC_FAST_WLAN_CONN_PREF_MIN,
CFG_BTC_FAST_WLAN_CONN_PREF_MAX ),
REG_VARIABLE( CFG_ENABLE_RTSCTS_HTVHT_NAME, WLAN_PARAM_Integer,
hdd_config_t, enableRtsCtsHtVht,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_RTSCTS_HTVHT_DEFAULT,
CFG_ENABLE_RTSCTS_HTVHT_MIN,
CFG_ENABLE_RTSCTS_HTVHT_MAX),
REG_VARIABLE( CFG_DXE_REPLENISH_RX_TIMER_VALUE, WLAN_PARAM_Integer,
hdd_config_t, dxeReplenishRXTimerVal,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DXE_REPLENISH_RX_TIMER_VALUE_DEFAULT,
CFG_DXE_REPLENISH_RX_TIMER_VALUE_MIN,
CFG_DXE_REPLENISH_RX_TIMER_VALUE_MAX ),
REG_VARIABLE( CFG_DXE_SSR_ENABLE, WLAN_PARAM_Integer,
hdd_config_t, dxeSSREnable,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_DXE_SSR_ENABLE_DEFAULT,
CFG_DXE_SSR_ENABLE_MIN,
CFG_DXE_SSR_ENABLE_MAX ),
REG_VARIABLE( CFG_MULTICAST_HOST_FW_MSGS, WLAN_PARAM_Integer,
hdd_config_t, multicast_host_msgs,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_MULTICAST_HOST_FW_MSGS_DEFAULT,
CFG_MULTICAST_HOST_FW_MSGS_MIN,
CFG_MULTICAST_HOST_FW_MSGS_MAX),
REG_VARIABLE( CFG_TOGGLE_ARP_BDRATES_NAME, WLAN_PARAM_Integer,
hdd_config_t, toggleArpBDRates,
VAR_FLAGS_OPTIONAL |
VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_TOGGLE_ARP_BDRATES_DEFAULT,
CFG_TOGGLE_ARP_BDRATES_MIN,
CFG_TOGGLE_ARP_BDRATES_MAX),
REG_VARIABLE( CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN , WLAN_PARAM_Integer,
hdd_config_t, btcStaticOppWlanIdleWlanLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN_DEFAULT,
CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN_MIN,
CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN_MAX ),
REG_VARIABLE( CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN , WLAN_PARAM_Integer,
hdd_config_t, btcStaticOppWlanIdleBtLen,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN_DEFAULT,
CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN_MIN,
CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN_MAX ),
REG_VARIABLE( CFG_LINK_FAIL_TIMEOUT_NAME , WLAN_PARAM_Integer,
hdd_config_t, linkFailTimeout,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_FAIL_TIMEOUT_DEF,
CFG_LINK_FAIL_TIMEOUT_MIN,
CFG_LINK_FAIL_TIMEOUT_MAX ),
REG_VARIABLE( CFG_LINK_FAIL_TX_CNT_NAME , WLAN_PARAM_Integer,
hdd_config_t, linkFailTxCnt,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_LINK_FAIL_TX_CNT_DEF,
CFG_LINK_FAIL_TX_CNT_MIN,
CFG_LINK_FAIL_TX_CNT_MAX ),
REG_VARIABLE( CFG_OPTIMIZE_CA_EVENT_NAME, WLAN_PARAM_Integer,
hdd_config_t, gOptimizeCAevent,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_OPTIMIZE_CA_EVENT_DEFAULT,
CFG_OPTIMIZE_CA_EVENT_DISABLE,
CFG_OPTIMIZE_CA_EVENT_ENABLE ),
REG_VARIABLE( CFG_ENABLE_CRASH_INJECT, WLAN_PARAM_Integer,
hdd_config_t, crash_inject_enabled,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_ENABLE_CRASH_INJECT_DEFAULT,
CFG_ENABLE_CRASH_INJECT_MIN,
CFG_ENABLE_CRASH_INJECT_MAX),
REG_VARIABLE( CFG_RPS_CPU_MAP_NAME, WLAN_PARAM_HexInteger,
hdd_config_t, rps_mask,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_RPS_CPU_MAP_DEFAULT,
CFG_RPS_CPU_MAP_MIN,
CFG_RPS_CPU_MAP_MAX),
REG_VARIABLE(CFG_SAR_BOFFSET_SET_CORRECTION_NAME, WLAN_PARAM_Integer,
hdd_config_t, boffset_correction_enable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
CFG_SAR_BOFFSET_SET_CORRECTION_DEFAULT,
CFG_SAR_BOFFSET_SET_CORRECTION_MIN,
CFG_SAR_BOFFSET_SET_CORRECTION_MAX),
};
/*
* This function returns a pointer to the character after the occurence
* of a new line character. It also modifies the original string by replacing
* the '\n' character with the null character.
* Function returns NULL if no new line character was found before end of
* string was reached
*/
static char* get_next_line(char* str, char *str_end)
{
char c;
if( str == NULL || *str == '\0') {
return NULL;
}
c = *str;
while(c != '\n' && c != '\0' && c != 0xd) {
str = str + 1;
if (str > str_end)
{
return str;
}
c = *str;
}
if (c == '\0' ) {
return NULL;
}
else
{
return (str+1);
}
return NULL;
}
// look for space. Ascii values to look are -
// 0x09 == horizontal tab
// 0x0a == Newline ("\n")
// 0x0b == vertical tab
// 0x0c == Newpage or feed form.
// 0x0d == carriage return (CR or "\r")
// Null ('\0') should not considered as space.
#define i_isspace(ch) (((ch) >= 0x09 && (ch) <= 0x0d) || (ch) == ' ')
/*
* This function trims any leading and trailing white spaces
*/
static char *i_trim(char *str)
{
char *ptr;
if(*str == '\0') return str;
/* Find the first non white-space*/
for (ptr = str; i_isspace(*ptr); ptr++);
if (*ptr == '\0')
return str;
/* This is the new start of the string*/
str = ptr;
/* Find the last non white-space */
ptr += strlen(ptr) - 1;
for (; ptr != str && i_isspace(*ptr); ptr--);
/* Null terminate the following character */
ptr[1] = '\0';
return str;
}
//Structure to store each entry in qcom_cfg.ini file
typedef struct
{
char *name;
char *value;
}tCfgIniEntry;
static VOS_STATUS hdd_apply_cfg_ini( hdd_context_t * pHddCtx,
tCfgIniEntry* iniTable, unsigned long entries);
#ifdef WLAN_CFG_DEBUG
void dump_cfg_ini (tCfgIniEntry* iniTable, unsigned long entries)
{
unsigned long i;
for (i = 0; i < entries; i++) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "%s entry Name=[%s] value=[%s]",
WLAN_INI_FILE, iniTable[i].name, iniTable[i].value);
}
}
#endif
/*
* This function reads the qcom_cfg.ini file and
* parses each 'Name=Value' pair in the ini file
*/
VOS_STATUS hdd_parse_config_ini(hdd_context_t* pHddCtx)
{
int status, i=0;
/** Pointer for firmware image data */
const struct firmware *fw = NULL;
char *buffer, *line, *pTemp = NULL;
size_t size;
char *name, *value;
/* cfgIniTable is static to avoid excess stack usage */
static tCfgIniEntry cfgIniTable[MAX_CFG_INI_ITEMS];
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
memset(cfgIniTable, 0, sizeof(cfgIniTable));
status = request_firmware(&fw, WLAN_INI_FILE, pHddCtx->parent_dev);
if(status)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: request_firmware failed %d",__func__, status);
vos_status = VOS_STATUS_E_FAILURE;
goto config_exit;
}
if(!fw || !fw->data || !fw->size)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: %s download failed",
__func__, WLAN_INI_FILE);
vos_status = VOS_STATUS_E_FAILURE;
goto config_exit;
}
hddLog(VOS_TRACE_LEVEL_INFO , "%s: qcom_cfg.ini Size %zu", __func__, fw->size);
buffer = (char*)vos_mem_vmalloc(fw->size);
if(NULL == buffer) {
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: kmalloc failure",__func__);
release_firmware(fw);
return VOS_STATUS_E_FAILURE;
}
pTemp = buffer;
vos_mem_copy((void*)buffer,(void *)fw->data, fw->size);
size = fw->size;
while (buffer != NULL)
{
/*
* get_next_line function used to modify the \n and \r delimiter
* to \0 before returning, without checking if it is over parsing the
* source buffer. So changed the function not to modify the buffer
* passed to it and letting the calling/caller function to take
* care of the return pointer validation and modification of the buffer.
* So validating if the return pointer is not greater than the end
* buffer address and modifying the buffer value.
*/
line = get_next_line(buffer, (pTemp + (fw->size-1)));
if(line >= (pTemp + fw->size)) {
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: INI file seems to be corrupted",
__func__);
vos_status = VOS_STATUS_E_FAILURE;
goto config_exit;
}
else if(line) {
*(line - 1) = '\0';
}
buffer = i_trim(buffer);
hddLog(LOG1, "%s: item", buffer);
if(strlen((char*)buffer) == 0 || *buffer == '#') {
buffer = line;
continue;
}
else if(strncmp(buffer, "END", 3) == 0 ) {
break;
}
else
{
name = buffer;
while(*buffer != '=' && *buffer != '\0')
buffer++;
if(*buffer != '\0') {
*buffer++ = '\0';
i_trim(name);
if(strlen (name) != 0) {
buffer = i_trim(buffer);
if(strlen(buffer)>0) {
value = buffer;
while(!i_isspace(*buffer) && *buffer != '\0')
buffer++;
*buffer = '\0';
cfgIniTable[i].name= name;
cfgIniTable[i++].value= value;
if(i >= MAX_CFG_INI_ITEMS) {
hddLog(LOGE,"%s: Number of items in %s > %d",
__func__, WLAN_INI_FILE, MAX_CFG_INI_ITEMS);
break;
}
}
}
}
}
buffer = line;
}
//Loop through the registry table and apply all these configs
vos_status = hdd_apply_cfg_ini(pHddCtx, cfgIniTable, i);
config_exit:
release_firmware(fw);
vos_mem_vfree(pTemp);
return vos_status;
}
static void print_hdd_cfg(hdd_context_t *pHddCtx)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "*********Config values in HDD Adapter*******");
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [RTSThreshold] Value = %u",pHddCtx->cfg_ini->RTSThreshold) ;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [OperatingChannel] Value = [%u]",pHddCtx->cfg_ini->OperatingChannel);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [PowerUsageControl] Value = [%s]",pHddCtx->cfg_ini->PowerUsageControl);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [fIsImpsEnabled] Value = [%u]",pHddCtx->cfg_ini->fIsImpsEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [AutoBmpsTimerEnabled] Value = [%u]",pHddCtx->cfg_ini->fIsAutoBmpsTimerEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nAutoBmpsTimerValue] Value = [%u]",pHddCtx->cfg_ini->nAutoBmpsTimerValue);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nVccRssiTrigger] Value = [%u]",pHddCtx->cfg_ini->nVccRssiTrigger);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [gIbssBssid] Value =["MAC_ADDRESS_STR"]",
MAC_ADDR_ARRAY(pHddCtx->cfg_ini->IbssBssid.bytes));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [Intf0MacAddress] Value =["MAC_ADDRESS_STR"]",
MAC_ADDR_ARRAY(pHddCtx->cfg_ini->intfMacAddr[0].bytes));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [Intf1MacAddress] Value =["MAC_ADDRESS_STR"]",
MAC_ADDR_ARRAY(pHddCtx->cfg_ini->intfMacAddr[1].bytes));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [Intf2MacAddress] Value =["MAC_ADDRESS_STR"]",
MAC_ADDR_ARRAY(pHddCtx->cfg_ini->intfMacAddr[2].bytes));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [Intf3MacAddress] Value =["MAC_ADDRESS_STR"]",
MAC_ADDR_ARRAY(pHddCtx->cfg_ini->intfMacAddr[3].bytes));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApEnableUapsd] value = [%u]",pHddCtx->cfg_ini->apUapsdEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAPCntryCode] Value =[%c%c%c]",
pHddCtx->cfg_ini->apCntryCode[0],pHddCtx->cfg_ini->apCntryCode[1],
pHddCtx->cfg_ini->apCntryCode[2]);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableApProt] value = [%u]", pHddCtx->cfg_ini->apProtEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAPAutoShutOff] Value = [%u]", pHddCtx->cfg_ini->nAPAutoShutOff);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableListenMode] Value = [%u]", pHddCtx->cfg_ini->nEnableListenMode);
VOS_TRACE (VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApProtection] value = [%u]",pHddCtx->cfg_ini->apProtection);
VOS_TRACE (VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableApOBSSProt] value = [%u]",pHddCtx->cfg_ini->apOBSSProtEnabled);
VOS_TRACE (VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApAutoChannelSelection] value = [%u]",pHddCtx->cfg_ini->apAutoChannelSelection);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ChannelBondingMode] Value = [%u]",pHddCtx->cfg_ini->nChannelBondingMode24GHz);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ChannelBondingMode] Value = [%u]",pHddCtx->cfg_ini->nChannelBondingMode5GHz);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [dot11Mode] Value = [%u]",pHddCtx->cfg_ini->dot11Mode);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [WmmMode] Value = [%u] ",pHddCtx->cfg_ini->WmmMode);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [UapsdMask] Value = [0x%x] ",pHddCtx->cfg_ini->UapsdMask);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [PktClassificationBasis] Value = [%u] ",pHddCtx->cfg_ini->PktClassificationBasis);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ImplicitQosIsEnabled] Value = [%u]",(int)pHddCtx->cfg_ini->bImplicitQosEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdVoSrvIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdVoSrvIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdVoSuspIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdVoSuspIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdViSrvIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdViSrvIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdViSuspIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdViSuspIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdBeSrvIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdBeSrvIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdBeSuspIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdBeSuspIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdBkSrvIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdBkSrvIntv);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraUapsdBkSuspIntv] Value = [%u] ",pHddCtx->cfg_ini->InfraUapsdBkSuspIntv);
#ifdef FEATURE_WLAN_ESE
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraInactivityInterval] Value = [%u] ",pHddCtx->cfg_ini->InfraInactivityInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [EseEnabled] Value = [%u] ",pHddCtx->cfg_ini->isEseIniFeatureEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [FastTransitionEnabled] Value = [%u] ",pHddCtx->cfg_ini->isFastTransitionEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gTxPowerCap] Value = [%u] dBm ",pHddCtx->cfg_ini->nTxPowerCap);
#endif
#ifdef FEATURE_WLAN_LFR
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [FastRoamEnabled] Value = [%u] ",pHddCtx->cfg_ini->isFastRoamIniFeatureEnabled);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [MAWCEnabled] Value = [%u] ",pHddCtx->cfg_ini->MAWCEnabled);
#endif
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [RoamRssiDiff] Value = [%u] ",pHddCtx->cfg_ini->RoamRssiDiff);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ImmediateRoamRssiDiff] Value = [%u] ",pHddCtx->cfg_ini->nImmediateRoamRssiDiff);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [isWESModeEnabled] Value = [%u] ",pHddCtx->cfg_ini->isWESModeEnabled);
#endif
#ifdef FEATURE_WLAN_OKC
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [OkcEnabled] Value = [%u] ",pHddCtx->cfg_ini->isOkcIniFeatureEnabled);
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraDirAcVo] Value = [%u] ",pHddCtx->cfg_ini->InfraDirAcVo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraNomMsduSizeAcVo] Value = [0x%x] ",pHddCtx->cfg_ini->InfraNomMsduSizeAcVo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMeanDataRateAcVo] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMeanDataRateAcVo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMinPhyRateAcVo] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMinPhyRateAcVo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraSbaAcVo] Value = [0x%x] ",pHddCtx->cfg_ini->InfraSbaAcVo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraDirAcVi] Value = [%u] ",pHddCtx->cfg_ini->InfraDirAcVi);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraNomMsduSizeAcVi] Value = [0x%x] ",pHddCtx->cfg_ini->InfraNomMsduSizeAcVi);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMeanDataRateAcVi] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMeanDataRateAcVi);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMinPhyRateAcVi] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMinPhyRateAcVi);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraSbaAcVi] Value = [0x%x] ",pHddCtx->cfg_ini->InfraSbaAcVi);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraDirAcBe] Value = [%u] ",pHddCtx->cfg_ini->InfraDirAcBe);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraNomMsduSizeAcBe] Value = [0x%x] ",pHddCtx->cfg_ini->InfraNomMsduSizeAcBe);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMeanDataRateAcBe] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMeanDataRateAcBe);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMinPhyRateAcBe] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMinPhyRateAcBe);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraSbaAcBe] Value = [0x%x] ",pHddCtx->cfg_ini->InfraSbaAcBe);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraDirAcBk] Value = [%u] ",pHddCtx->cfg_ini->InfraDirAcBk);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraNomMsduSizeAcBk] Value = [0x%x] ",pHddCtx->cfg_ini->InfraNomMsduSizeAcBk);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMeanDataRateAcBk] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMeanDataRateAcBk);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraMinPhyRateAcBk] Value = [0x%x] ",pHddCtx->cfg_ini->InfraMinPhyRateAcBk);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [InfraSbaAcBk] Value = [0x%x] ",pHddCtx->cfg_ini->InfraSbaAcBk);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [WfqBkWeight] Value = [%u] ",pHddCtx->cfg_ini->WfqBkWeight);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [WfqBeWeight] Value = [%u] ",pHddCtx->cfg_ini->WfqBeWeight);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [WfqViWeight] Value = [%u] ",pHddCtx->cfg_ini->WfqViWeight);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [WfqVoWeight] Value = [%u] ",pHddCtx->cfg_ini->WfqVoWeight);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [DelayedTriggerFrmInt] Value = [%u] ",pHddCtx->cfg_ini->DelayedTriggerFrmInt);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [BkReorderAgingTime] Value = [%u] ",pHddCtx->cfg_ini->BkReorderAgingTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [BeReorderAgingTime] Value = [%u] ",pHddCtx->cfg_ini->BeReorderAgingTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ViReorderAgingTime] Value = [%u] ",pHddCtx->cfg_ini->ViReorderAgingTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [VoReorderAgingTime] Value = [%u] ",pHddCtx->cfg_ini->VoReorderAgingTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [mcastBcastFilterSetting] Value = [%u] ",pHddCtx->cfg_ini->mcastBcastFilterSetting);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [fhostArpOffload] Value = [%u] ",pHddCtx->cfg_ini->fhostArpOffload);
#ifdef WLAN_FEATURE_VOWIFI_11R
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [fFTResourceReqSupported] Value = [%u] ",pHddCtx->cfg_ini->fFTResourceReqSupported);
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableRoamDelayStats] Value = [%u] ",pHddCtx->cfg_ini->gEnableRoamDelayStats);
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborReassocRssiThreshold] Value = [%u] ",pHddCtx->cfg_ini->nNeighborReassocRssiThreshold);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborLookupRssiThreshold] Value = [%u] ",pHddCtx->cfg_ini->nNeighborLookupRssiThreshold);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborScanMinChanTime] Value = [%u] ",pHddCtx->cfg_ini->nNeighborScanMinChanTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborScanMaxChanTime] Value = [%u] ",pHddCtx->cfg_ini->nNeighborScanMaxChanTime);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nMaxNeighborRetries] Value = [%u] ",pHddCtx->cfg_ini->nMaxNeighborReqTries);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborScanPeriod] Value = [%u] ",pHddCtx->cfg_ini->nNeighborScanPeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborScanResultsRefreshPeriod] Value = [%u] ",pHddCtx->cfg_ini->nNeighborResultsRefreshPeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nEmptyScanRefreshPeriod] Value = [%u] ",pHddCtx->cfg_ini->nEmptyScanRefreshPeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nNeighborInitialForcedRoamTo5GhEnable] Value = [%u] ",pHddCtx->cfg_ini->nNeighborInitialForcedRoamTo5GhEnable);
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [burstSizeDefinition] Value = [0x%x] ",pHddCtx->cfg_ini->burstSizeDefinition);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [tsInfoAckPolicy] Value = [0x%x] ",pHddCtx->cfg_ini->tsInfoAckPolicy);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [rfSettlingTimeUs] Value = [%u] ",pHddCtx->cfg_ini->rfSettlingTimeUs);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [bSingleTidRc] Value = [%u] ",pHddCtx->cfg_ini->bSingleTidRc);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gDynamicPSPollvalue] Value = [%u] ",pHddCtx->cfg_ini->dynamicPsPollValue);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAddTSWhenACMIsOff] Value = [%u] ",pHddCtx->cfg_ini->AddTSWhenACMIsOff);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gValidateScanList] Value = [%u] ",pHddCtx->cfg_ini->fValidateScanList);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gStaKeepAlivePeriod] Value = [%u] ",pHddCtx->cfg_ini->infraStaKeepAlivePeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApDataAvailPollInterVal] Value = [%u] ",pHddCtx->cfg_ini->apDataAvailPollPeriodInMs);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableBtAmp] Value = [%u] ",pHddCtx->cfg_ini->enableBtAmp);
#ifdef WLAN_BTAMP_FEATURE
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [BtAmpPreferredChannel] Value = [%u] ",pHddCtx->cfg_ini->preferredChannel);
#endif //WLAN_BTAMP_FEATURE
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [BandCapability] Value = [%u] ",pHddCtx->cfg_ini->nBandCapability);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [fEnableBeaconEarlyTermination] Value = [%u] ",pHddCtx->cfg_ini->fEnableBeaconEarlyTermination);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [teleBcnWakeupEnable] Value = [%u] ",pHddCtx->cfg_ini->teleBcnWakeupEn);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [transListenInterval] Value = [%u] ",pHddCtx->cfg_ini->nTeleBcnTransListenInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [transLiNumIdleBeacons] Value = [%u] ",pHddCtx->cfg_ini->nTeleBcnTransLiNumIdleBeacons);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [maxListenInterval] Value = [%u] ",pHddCtx->cfg_ini->nTeleBcnMaxListenInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [maxLiNumIdleBeacons] Value = [%u] ",pHddCtx->cfg_ini->nTeleBcnMaxLiNumIdleBeacons);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [bcnEarlyTermWakeInterval] Value = [%u] ",pHddCtx->cfg_ini->bcnEarlyTermWakeInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApDataAvailPollInterVal] Value = [%u] ",pHddCtx->cfg_ini->apDataAvailPollPeriodInMs);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableBypass11d] Value = [%u] ",pHddCtx->cfg_ini->enableBypass11d);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableDFSChnlScan] Value = [%u] ",pHddCtx->cfg_ini->enableDFSChnlScan);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableDFSPnoChnlScan] Value = [%u] ",pHddCtx->cfg_ini->enableDFSPnoChnlScan);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gReportMaxLinkSpeed] Value = [%u] ",pHddCtx->cfg_ini->reportMaxLinkSpeed);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [thermalMitigationEnable] Value = [%u] ",pHddCtx->cfg_ini->thermalMitigationEnable);
#ifdef WLAN_FEATURE_11AC
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gVhtChannelWidth] value = [%u]",pHddCtx->cfg_ini->vhtChannelWidth);
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [enableFirstScan2GOnly] Value = [%u] ",pHddCtx->cfg_ini->enableFirstScan2GOnly);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [skipDfsChnlInP2pSearch] Value = [%u] ",pHddCtx->cfg_ini->skipDfsChnlInP2pSearch);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [ignoreDynamicDtimInP2pMode] Value = [%u] ",pHddCtx->cfg_ini->ignoreDynamicDtimInP2pMode);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [enableRxSTBC] Value = [%u] ",pHddCtx->cfg_ini->enableRxSTBC);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableLpwrImgTransition] Value = [%u] ",pHddCtx->cfg_ini->enableLpwrImgTransition);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableSSR] Value = [%u] ",pHddCtx->cfg_ini->enableSSR);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableVhtFor24GHzBand] Value = [%u] ",pHddCtx->cfg_ini->enableVhtFor24GHzBand);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableTrafficMonitor] Value = [%u] ", pHddCtx->cfg_ini->enableTrafficMonitor);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gTrafficIdleTimeout] Value = [%u] ", pHddCtx->cfg_ini->trafficIdleTimeout);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gFlexConnectPowerFactor] Value = [%u] ", pHddCtx->cfg_ini->flexConnectPowerFactor);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableIbssHeartBeatOffload] Value = [%u] ", pHddCtx->cfg_ini->enableIbssHeartBeatOffload);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAntennaDiversity] Value = [%u] ", pHddCtx->cfg_ini->antennaDiversity);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gGoLinkMonitorPeriod] Value = [%u]",pHddCtx->cfg_ini->goLinkMonitorPeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApLinkMonitorPeriod] Value = [%u]",pHddCtx->cfg_ini->apLinkMonitorPeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gGoKeepAlivePeriod] Value = [%u]",pHddCtx->cfg_ini->goKeepAlivePeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApKeepAlivePeriod]Value = [%u]",pHddCtx->cfg_ini->apKeepAlivePeriod);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAmsduSupportInAMPDU] Value = [%u] ",pHddCtx->cfg_ini->isAmsduSupportInAMPDU);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [nSelect5GHzMargin] Value = [%u] ",pHddCtx->cfg_ini->nSelect5GHzMargin);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gCoalesingInIBSS] Value = [%u] ",pHddCtx->cfg_ini->isCoalesingInIBSSAllowed);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [overrideCountryCode] Value = [%s] ",pHddCtx->cfg_ini->overrideCountryCode);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAsdProbeInterval] Value = [%u]",pHddCtx->cfg_ini->gAsdProbeInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAsdTriggerThreshold] Value = [%hhd]",pHddCtx->cfg_ini->gAsdTriggerThreshold);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAsdRTTRssiHystThreshold]Value = [%u]",pHddCtx->cfg_ini->gAsdRTTRssiHystThreshold);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [gIgnorePeerErpInfo] Value = [%u] ",
pHddCtx->cfg_ini->ignorePeerErpInfo);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"Name = [disableP2PMacSpoofing] Value = [%u] ",
pHddCtx->cfg_ini->disableP2PMacSpoofing);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gRoamtoDFSChannel] Value = [%u] ",pHddCtx->cfg_ini->allowDFSChannelRoam);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gMaxConcurrentActiveSessions] Value = [%u] ", pHddCtx->cfg_ini->gMaxConcurrentActiveSessions);
#ifdef WLAN_FEATURE_AP_HT40_24G
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gApHT4024G] Value = [%u]", pHddCtx->cfg_ini->apHT40_24GEnabled);
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gAcsScanBandPreference] Value = [%u] ",pHddCtx->cfg_ini->acsScanBandPreference);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gACSBandSwitchThreshold] value = [%u]",pHddCtx->cfg_ini->acsBandSwitchThreshold);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gDeferScanTimeInterval] value = [%u]",pHddCtx->cfg_ini->nDeferScanTimeInterval);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableTDLSScan] value = [%u]\n",pHddCtx->cfg_ini->fEnableTDLSScan);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gDxeReplenishRXTimerVal] Value = [%u] ", pHddCtx->cfg_ini->dxeReplenishRXTimerVal);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gDxeSSREnable] Value = [%u] ", pHddCtx->cfg_ini->dxeSSREnable);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [toggleArpBDRates] Value = [%u] ", pHddCtx->cfg_ini->toggleArpBDRates);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Name = [gEnableForceTargetAssert] Value = [%u] ", pHddCtx->cfg_ini->crash_inject_enabled);
}
#define CFG_VALUE_MAX_LEN 256
#define CFG_ENTRY_MAX_LEN (32+CFG_VALUE_MAX_LEN)
VOS_STATUS hdd_cfg_get_config(hdd_context_t *pHddCtx, char *pBuf, int buflen)
{
unsigned int idx;
REG_TABLE_ENTRY *pRegEntry = g_registry_table;
unsigned long cRegTableEntries = sizeof(g_registry_table) / sizeof( g_registry_table[ 0 ]);
v_U32_t value;
char valueStr[CFG_VALUE_MAX_LEN];
char configStr[CFG_ENTRY_MAX_LEN];
char *fmt;
void *pField;
v_MACADDR_t *pMacAddr;
char *pCur = pBuf;
int curlen;
// start with an empty string
*pCur = '\0';
for ( idx = 0; idx < cRegTableEntries; idx++, pRegEntry++ )
{
pField = ( (v_U8_t *)pHddCtx->cfg_ini) + pRegEntry->VarOffset;
if ( ( WLAN_PARAM_Integer == pRegEntry->RegType ) ||
( WLAN_PARAM_SignedInteger == pRegEntry->RegType ) ||
( WLAN_PARAM_HexInteger == pRegEntry->RegType ) )
{
value = 0;
memcpy( &value, pField, pRegEntry->VarSize );
if ( WLAN_PARAM_HexInteger == pRegEntry->RegType )
{
fmt = "%x";
}
else if ( WLAN_PARAM_SignedInteger == pRegEntry->RegType )
{
fmt = "%d";
}
else
{
fmt = "%u";
}
snprintf(valueStr, CFG_VALUE_MAX_LEN, fmt, value);
}
else if ( WLAN_PARAM_String == pRegEntry->RegType )
{
snprintf(valueStr, CFG_VALUE_MAX_LEN, "%s", (char *)pField);
}
else if ( WLAN_PARAM_MacAddr == pRegEntry->RegType )
{
pMacAddr = (v_MACADDR_t *) pField;
snprintf(valueStr, CFG_VALUE_MAX_LEN,
"%02x:%02x:%02x:%02x:%02x:%02x",
pMacAddr->bytes[0],
pMacAddr->bytes[1],
pMacAddr->bytes[2],
pMacAddr->bytes[3],
pMacAddr->bytes[4],
pMacAddr->bytes[5]);
}
else
{
snprintf(valueStr, CFG_VALUE_MAX_LEN, "(unhandled)");
}
curlen = scnprintf(configStr, CFG_ENTRY_MAX_LEN,
"%s=[%s]%s\n",
pRegEntry->RegName,
valueStr,
test_bit(idx, (void *)&pHddCtx->cfg_ini->bExplicitCfg) ?
"*" : "");
// ideally we want to return the config to the application
// however the config is too big so we just printk() for now
#ifdef RETURN_IN_BUFFER
if (curlen <= buflen)
{
// copy string + '\0'
memcpy(pCur, configStr, curlen+1);
// account for addition;
pCur += curlen;
buflen -= curlen;
}
else
{
// buffer space exhausted, return what we have
return VOS_STATUS_E_RESOURCES;
}
#else
printk(KERN_CRIT "%s", configStr);
#endif // RETURN_IN_BUFFER
}
#ifndef RETURN_IN_BUFFER
// notify application that output is in system log
snprintf(pCur, buflen, "WLAN configuration written to system log");
#endif // RETURN_IN_BUFFER
return VOS_STATUS_SUCCESS;
}
static VOS_STATUS find_cfg_item (tCfgIniEntry* iniTable, unsigned long entries,
char *name, char** value)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
unsigned long i;
for (i = 0; i < entries; i++) {
if (strcmp(iniTable[i].name, name) == 0) {
*value = iniTable[i].value;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "Found %s entry for Name=[%s] Value=[%s] ",
WLAN_INI_FILE, name, *value);
return VOS_STATUS_SUCCESS;
}
}
return status;
}
static int parseHexDigit(char c)
{
if (c >= '0' && c <= '9')
return c-'0';
if (c >= 'a' && c <= 'f')
return c-'a'+10;
if (c >= 'A' && c <= 'F')
return c-'A'+10;
return 0;
}
static VOS_STATUS hdd_apply_cfg_ini( hdd_context_t *pHddCtx, tCfgIniEntry* iniTable, unsigned long entries)
{
VOS_STATUS match_status = VOS_STATUS_E_FAILURE;
VOS_STATUS ret_status = VOS_STATUS_SUCCESS;
unsigned int idx;
void *pField;
char *value_str = NULL;
unsigned long len_value_str;
char *candidate;
v_U32_t value;
v_S31_t svalue;
void *pStructBase = pHddCtx->cfg_ini;
REG_TABLE_ENTRY *pRegEntry = g_registry_table;
unsigned long cRegTableEntries = sizeof(g_registry_table) / sizeof( g_registry_table[ 0 ]);
v_U32_t cbOutString;
int i;
int rv;
// sanity test
if (MAX_CFG_INI_ITEMS < cRegTableEntries)
{
hddLog(LOGE, "%s: MAX_CFG_INI_ITEMS too small, must be at least %ld",
__func__, cRegTableEntries);
}
for ( idx = 0; idx < cRegTableEntries; idx++, pRegEntry++ )
{
//Calculate the address of the destination field in the structure.
pField = ( (v_U8_t *)pStructBase )+ pRegEntry->VarOffset;
match_status = find_cfg_item(iniTable, entries, pRegEntry->RegName, &value_str);
if( (match_status != VOS_STATUS_SUCCESS) && ( pRegEntry->Flags & VAR_FLAGS_REQUIRED ) )
{
// If we could not read the cfg item and it is required, this is an error.
hddLog(LOGE, "%s: Failed to read required config parameter %s",
__func__, pRegEntry->RegName);
ret_status = VOS_STATUS_E_FAILURE;
break;
}
if ( ( WLAN_PARAM_Integer == pRegEntry->RegType ) ||
( WLAN_PARAM_HexInteger == pRegEntry->RegType ) )
{
// If successfully read from the registry, use the value read.
// If not, use the default value.
if ( match_status == VOS_STATUS_SUCCESS && (WLAN_PARAM_Integer == pRegEntry->RegType)) {
rv = kstrtou32(value_str, 10, &value);
if (rv < 0) {
hddLog(LOGE, "%s: Reg Parameter %s invalid. Enforcing default",
__func__, pRegEntry->RegName);
value = pRegEntry->VarDefault;
}
}
else if ( match_status == VOS_STATUS_SUCCESS && (WLAN_PARAM_HexInteger == pRegEntry->RegType)) {
rv = kstrtou32(value_str, 16, &value);
if (rv < 0) {
hddLog(LOGE, "%s: Reg paramter %s invalid. Enforcing default",
__func__, pRegEntry->RegName);
value = pRegEntry->VarDefault;
}
}
else {
value = pRegEntry->VarDefault;
}
// If this parameter needs range checking, do it here.
if ( pRegEntry->Flags & VAR_FLAGS_RANGE_CHECK )
{
if ( value > pRegEntry->VarMax )
{
hddLog(LOGE, "%s: Reg Parameter %s > allowed Maximum [%u > %lu]. Enforcing Maximum",
__func__, pRegEntry->RegName, value, pRegEntry->VarMax );
value = pRegEntry->VarMax;
}
if ( value < pRegEntry->VarMin )
{
hddLog(LOGE, "%s: Reg Parameter %s < allowed Minimum [%u < %lu]. Enforcing Minimum",
__func__, pRegEntry->RegName, value, pRegEntry->VarMin);
value = pRegEntry->VarMin;
}
}
// If this parameter needs range checking, do it here.
else if ( pRegEntry->Flags & VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT )
{
if ( value > pRegEntry->VarMax )
{
hddLog(LOGE, "%s: Reg Parameter %s > allowed Maximum [%u > %lu]. Enforcing Default= %lu",
__func__, pRegEntry->RegName, value, pRegEntry->VarMax, pRegEntry->VarDefault );
value = pRegEntry->VarDefault;
}
if ( value < pRegEntry->VarMin )
{
hddLog(LOGE, "%s: Reg Parameter %s < allowed Minimum [%u < %lu]. Enforcing Default= %lu",
__func__, pRegEntry->RegName, value, pRegEntry->VarMin, pRegEntry->VarDefault );
value = pRegEntry->VarDefault;
}
}
// Move the variable into the output field.
memcpy( pField, &value, pRegEntry->VarSize );
}
else if ( WLAN_PARAM_SignedInteger == pRegEntry->RegType )
{
// If successfully read from the registry, use the value read.
// If not, use the default value.
if (VOS_STATUS_SUCCESS == match_status)
{
rv = kstrtos32(value_str, 10, &svalue);
if (rv < 0) {
hddLog(VOS_TRACE_LEVEL_WARN, "%s: Reg Parameter %s invalid. Enforcing Default",
__func__, pRegEntry->RegName);
svalue = (v_S31_t)pRegEntry->VarDefault;
}
}
else
{
svalue = (v_S31_t)pRegEntry->VarDefault;
}
// If this parameter needs range checking, do it here.
if ( pRegEntry->Flags & VAR_FLAGS_RANGE_CHECK )
{
if ( svalue > (v_S31_t)pRegEntry->VarMax )
{
hddLog(LOGE, "%s: Reg Parameter %s > allowed Maximum "
"[%d > %d]. Enforcing Maximum", __func__,
pRegEntry->RegName, svalue, (int)pRegEntry->VarMax );
svalue = (v_S31_t)pRegEntry->VarMax;
}
if ( svalue < (v_S31_t)pRegEntry->VarMin )
{
hddLog(LOGE, "%s: Reg Parameter %s < allowed Minimum "
"[%d < %d]. Enforcing Minimum", __func__,
pRegEntry->RegName, svalue, (int)pRegEntry->VarMin);
svalue = (v_S31_t)pRegEntry->VarMin;
}
}
// If this parameter needs range checking, do it here.
else if ( pRegEntry->Flags & VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT )
{
if ( svalue > (v_S31_t)pRegEntry->VarMax )
{
hddLog(LOGE, "%s: Reg Parameter %s > allowed Maximum "
"[%d > %d]. Enforcing Default= %d",
__func__, pRegEntry->RegName, svalue,
(int)pRegEntry->VarMax,
(int)pRegEntry->VarDefault );
svalue = (v_S31_t)pRegEntry->VarDefault;
}
if ( svalue < (v_S31_t)pRegEntry->VarMin )
{
hddLog(LOGE, "%s: Reg Parameter %s < allowed Minimum "
"[%d < %d]. Enforcing Default= %d",
__func__, pRegEntry->RegName, svalue,
(int)pRegEntry->VarMin,
(int)pRegEntry->VarDefault);
svalue = pRegEntry->VarDefault;
}
}
// Move the variable into the output field.
memcpy( pField, &svalue, pRegEntry->VarSize );
}
// Handle string parameters
else if ( WLAN_PARAM_String == pRegEntry->RegType )
{
#ifdef WLAN_CFG_DEBUG
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH, "RegName = %s, VarOffset %u VarSize %u VarDefault %s",
pRegEntry->RegName, pRegEntry->VarOffset, pRegEntry->VarSize, (char*)pRegEntry->VarDefault);
#endif
if ( match_status == VOS_STATUS_SUCCESS)
{
len_value_str = strlen(value_str);
if(len_value_str > (pRegEntry->VarSize - 1)) {
hddLog(LOGE, "%s: Invalid Value=[%s] specified for Name=[%s] in %s",
__func__, value_str, pRegEntry->RegName, WLAN_INI_FILE);
cbOutString = utilMin( strlen( (char *)pRegEntry->VarDefault ), pRegEntry->VarSize - 1 );
memcpy( pField, (void *)(pRegEntry->VarDefault), cbOutString );
( (v_U8_t *)pField )[ cbOutString ] = '\0';
}
else
{
memcpy( pField, (void *)(value_str), len_value_str);
( (v_U8_t *)pField )[ len_value_str ] = '\0';
}
}
else
{
// Failed to read the string parameter from the registry. Use the default.
cbOutString = utilMin( strlen( (char *)pRegEntry->VarDefault ), pRegEntry->VarSize - 1 );
memcpy( pField, (void *)(pRegEntry->VarDefault), cbOutString );
( (v_U8_t *)pField )[ cbOutString ] = '\0';
}
}
else if ( WLAN_PARAM_MacAddr == pRegEntry->RegType )
{
if(pRegEntry->VarSize != VOS_MAC_ADDR_SIZE) {
hddLog(LOGE, "%s: Invalid VarSize %u for Name=[%s]",
__func__, pRegEntry->VarSize, pRegEntry->RegName);
continue;
}
candidate = (char*)pRegEntry->VarDefault;
if ( match_status == VOS_STATUS_SUCCESS) {
len_value_str = strlen(value_str);
if(len_value_str != (VOS_MAC_ADDR_SIZE*2)) {
hddLog(LOGE, "%s: Invalid MAC addr [%s] specified for Name=[%s] in %s",
__func__, value_str, pRegEntry->RegName, WLAN_INI_FILE);
}
else
candidate = value_str;
}
//parse the string and store it in the byte array
for(i=0; i<VOS_MAC_ADDR_SIZE; i++)
{
((char*)pField)[i] =
(char)(parseHexDigit(candidate[i*2])*16 + parseHexDigit(candidate[i*2+1]));
}
}
else
{
hddLog(LOGE, "%s: Unknown param type for name[%s] in registry table",
__func__, pRegEntry->RegName);
}
// did we successfully parse a cfg item for this parameter?
if( (match_status == VOS_STATUS_SUCCESS) &&
(idx < MAX_CFG_INI_ITEMS) )
{
set_bit(idx, (void *)&pHddCtx->cfg_ini->bExplicitCfg);
}
}
print_hdd_cfg(pHddCtx);
return( ret_status );
}
eCsrPhyMode hdd_cfg_xlate_to_csr_phy_mode( eHddDot11Mode dot11Mode )
{
switch (dot11Mode)
{
case (eHDD_DOT11_MODE_abg):
return eCSR_DOT11_MODE_abg;
case (eHDD_DOT11_MODE_11b):
return eCSR_DOT11_MODE_11b;
case (eHDD_DOT11_MODE_11g):
return eCSR_DOT11_MODE_11g;
default:
case (eHDD_DOT11_MODE_11n):
return eCSR_DOT11_MODE_11n;
case (eHDD_DOT11_MODE_11g_ONLY):
return eCSR_DOT11_MODE_11g_ONLY;
case (eHDD_DOT11_MODE_11n_ONLY):
return eCSR_DOT11_MODE_11n_ONLY;
case (eHDD_DOT11_MODE_11b_ONLY):
return eCSR_DOT11_MODE_11b_ONLY;
#ifdef WLAN_FEATURE_11AC
case (eHDD_DOT11_MODE_11ac_ONLY):
return eCSR_DOT11_MODE_11ac_ONLY;
case (eHDD_DOT11_MODE_11ac):
return eCSR_DOT11_MODE_11ac;
#endif
case (eHDD_DOT11_MODE_AUTO):
return eCSR_DOT11_MODE_AUTO;
}
}
static void hdd_set_btc_config(hdd_context_t *pHddCtx)
{
hdd_config_t *pConfig = pHddCtx->cfg_ini;
tSmeBtcConfig btcParams;
int i;
sme_BtcGetConfig(pHddCtx->hHal, &btcParams);
btcParams.btcExecutionMode = pConfig->btcExecutionMode;
btcParams.btcConsBtSlotsToBlockDuringDhcp = pConfig->btcConsBtSlotsToBlockDuringDhcp;
btcParams.btcA2DPBtSubIntervalsDuringDhcp = pConfig->btcA2DPBtSubIntervalsDuringDhcp;
btcParams.btcStaticLenInqBt = pConfig->btcStaticLenInqBt;
btcParams.btcStaticLenPageBt = pConfig->btcStaticLenPageBt;
btcParams.btcStaticLenConnBt = pConfig->btcStaticLenConnBt;
btcParams.btcStaticLenLeBt = pConfig->btcStaticLenLeBt;
btcParams.btcStaticLenInqWlan = pConfig->btcStaticLenInqWlan;
btcParams.btcStaticLenPageWlan = pConfig->btcStaticLenPageWlan;
btcParams.btcStaticLenConnWlan = pConfig->btcStaticLenConnWlan;
btcParams.btcStaticLenLeWlan = pConfig->btcStaticLenLeWlan;
btcParams.btcDynMaxLenBt = pConfig->btcDynMaxLenBt;
btcParams.btcDynMaxLenWlan = pConfig->btcDynMaxLenWlan;
btcParams.btcMaxScoBlockPerc = pConfig->btcMaxScoBlockPerc;
btcParams.btcDhcpProtOnA2dp = pConfig->btcDhcpProtOnA2dp;
btcParams.btcDhcpProtOnSco = pConfig->btcDhcpProtOnSco;
for (i = 0; i < 10; i++)
{
btcParams.mwsCoexVictimWANFreq[i] = pConfig->mwsCoexVictimWANFreq[i];
btcParams.mwsCoexVictimWLANFreq[i] = pConfig->mwsCoexVictimWLANFreq[i];
btcParams.mwsCoexVictimConfig[i] = pConfig->mwsCoexVictimConfig[i];
btcParams.mwsCoexVictimConfig2[i] = pConfig->mwsCoexVictimConfig2[i];
}
for (i = 0; i < 6; i++)
{
btcParams.mwsCoexConfig[i] = pConfig->mwsCoexConfig[i];
}
btcParams.mwsCoexModemBackoff = pConfig->mwsCoexModemBackoff;
btcParams.SARPowerBackoff = pConfig->SARPowerBackoff;
sme_BtcSetConfig(pHddCtx->hHal, &btcParams);
}
static void hdd_set_power_save_config(hdd_context_t *pHddCtx, tSmeConfigParams *smeConfig)
{
hdd_config_t *pConfig = pHddCtx->cfg_ini;
tPmcBmpsConfigParams bmpsParams;
sme_GetConfigPowerSave(pHddCtx->hHal, ePMC_BEACON_MODE_POWER_SAVE, &bmpsParams);
if (strcmp(pConfig->PowerUsageControl, "Min") == 0)
{
smeConfig->csrConfig.impsSleepTime = pConfig->nImpsMinSleepTime;
bmpsParams.bmpsPeriod = pConfig->nBmpsMinListenInterval;
bmpsParams.enableBeaconEarlyTermination = pConfig->fEnableBeaconEarlyTermination;
bmpsParams.bcnEarlyTermWakeInterval = pConfig->bcnEarlyTermWakeInterval;
}
if (strcmp(pConfig->PowerUsageControl, "Max") == 0)
{
smeConfig->csrConfig.impsSleepTime = pConfig->nImpsMaxSleepTime;
bmpsParams.bmpsPeriod = pConfig->nBmpsMaxListenInterval;
bmpsParams.enableBeaconEarlyTermination = pConfig->fEnableBeaconEarlyTermination;
bmpsParams.bcnEarlyTermWakeInterval = pConfig->bcnEarlyTermWakeInterval;
}
if (strcmp(pConfig->PowerUsageControl, "Mod") == 0)
{
smeConfig->csrConfig.impsSleepTime = pConfig->nImpsModSleepTime;
bmpsParams.bmpsPeriod = pConfig->nBmpsModListenInterval;
bmpsParams.enableBeaconEarlyTermination = pConfig->fEnableBeaconEarlyTermination;
bmpsParams.bcnEarlyTermWakeInterval = pConfig->bcnEarlyTermWakeInterval;
}
if (pConfig->fIsImpsEnabled)
{
sme_EnablePowerSave (pHddCtx->hHal, ePMC_IDLE_MODE_POWER_SAVE);
}
else
{
sme_DisablePowerSave (pHddCtx->hHal, ePMC_IDLE_MODE_POWER_SAVE);
}
/*If isAndroidPsEn is 1 then Host driver and above layers control the PS mechanism
If Value set to 0 Driver/Core Stack internally control the Power saving mechanism */
sme_SetHostPowerSave (pHddCtx->hHal, pConfig->isAndroidPsEn);
if (pConfig->fIsBmpsEnabled)
{
sme_EnablePowerSave (pHddCtx->hHal, ePMC_BEACON_MODE_POWER_SAVE);
}
else
{
sme_DisablePowerSave (pHddCtx->hHal, ePMC_BEACON_MODE_POWER_SAVE);
}
bmpsParams.trafficMeasurePeriod = pConfig->nAutoBmpsTimerValue;
if (sme_SetConfigPowerSave(pHddCtx->hHal, ePMC_BEACON_MODE_POWER_SAVE, &bmpsParams)== eHAL_STATUS_FAILURE)
{
hddLog(LOGE, "SetConfigPowerSave failed to set BMPS params");
}
if(pConfig->fIsAutoBmpsTimerEnabled)
{
sme_StartAutoBmpsTimer(pHddCtx->hHal);
}
}
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
static VOS_STATUS hdd_string_to_u8_array( char *str, tANI_U8 *intArray, tANI_U8 *len, tANI_U8 intArrayMaxLen )
{
char *s = str;
if( str == NULL || intArray == NULL || len == NULL )
{
return VOS_STATUS_E_INVAL;
}
*len = 0;
while ( (s != NULL) && (*len < intArrayMaxLen) )
{
int val;
//Increment length only if sscanf succesfully extracted one element.
//Any other return value means error. Ignore it.
if( sscanf(s, "%d", &val ) == 1 )
{
intArray[*len] = (tANI_U8) val;
*len += 1;
}
s = strpbrk( s, "," );
if( s )
s++;
}
return VOS_STATUS_SUCCESS;
}
#endif
v_BOOL_t hdd_update_config_dat( hdd_context_t *pHddCtx )
{
v_BOOL_t fStatus = TRUE;
#ifdef WLAN_SOFTAP_VSTA_FEATURE
tANI_U32 val;
#endif
hdd_config_t *pConfig = pHddCtx->cfg_ini;
tSirMacHTCapabilityInfo *htCapInfo;
tANI_U32 temp32;
tANI_U16 temp16;
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_SHORT_GI_20MHZ,
pConfig->ShortGI20MhzEnable, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_SHORT_GI_20MHZ to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_CAL_CONTROL, pConfig->Calibration,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_CAL_CONTROL to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_CAL_PERIOD, pConfig->CalibrationPeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_CAL_PERIOD to CCM");
}
if ( 0 != pConfig->Cfg1Id )
{
if (ccmCfgSetInt(pHddCtx->hHal, pConfig->Cfg1Id, pConfig->Cfg1Value, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on Cfg1Id to CCM");
}
}
if ( 0 != pConfig->Cfg2Id )
{
if (ccmCfgSetInt(pHddCtx->hHal, pConfig->Cfg2Id, pConfig->Cfg2Value,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on Cfg2Id to CCM");
}
}
if ( 0 != pConfig->Cfg3Id )
{
if (ccmCfgSetInt(pHddCtx->hHal, pConfig->Cfg3Id, pConfig->Cfg3Value,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on Cfg3Id to CCM");
}
}
if ( 0 != pConfig->Cfg4Id )
{
if (ccmCfgSetInt(pHddCtx->hHal, pConfig->Cfg4Id, pConfig->Cfg4Value,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on Cfg4Id to CCM");
}
}
if ( 0 != pConfig->Cfg5Id )
{
if (ccmCfgSetInt(pHddCtx->hHal, pConfig->Cfg5Id, pConfig->Cfg5Value,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on Cfg5Id to CCM");
}
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BA_AUTO_SETUP, pConfig->BlockAckAutoSetup,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BA_AUTO_SETUP to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_FIXED_RATE, pConfig->TxRate,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_FIXED_RATE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_RX_AMPDU_FACTOR,
pConfig->MaxRxAmpduFactor, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Could not pass on WNI_CFG_HT_AMPDU_PARAMS_MAX_RX_AMPDU_FACTOR to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_SHORT_PREAMBLE, pConfig->fIsShortPreamble,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Could not pass on WNI_CFG_SHORT_PREAMBLE to CCM");
}
if (pConfig->fIsAutoIbssBssid)
{
if (ccmCfgSetStr(pHddCtx->hHal, WNI_CFG_BSSID, (v_U8_t *)"000000000000",
sizeof(v_BYTE_t) * VOS_MAC_ADDR_SIZE, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Could not pass on WNI_CFG_BSSID to CCM");
}
}
else
{
if ( VOS_FALSE == vos_is_macaddr_group( &pConfig->IbssBssid ))
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_MED,
"MAC Addr (IBSS BSSID) read from Registry is: " MAC_ADDRESS_STR,
MAC_ADDR_ARRAY(pConfig->IbssBssid.bytes));
if (ccmCfgSetStr(pHddCtx->hHal, WNI_CFG_BSSID, pConfig->IbssBssid.bytes,
sizeof(v_BYTE_t) * VOS_MAC_ADDR_SIZE, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Could not pass on WNI_CFG_BSSID to CCM");
}
}
else
{
fStatus = FALSE;
hddLog(LOGE,"Could not pass on WNI_CFG_BSSID to CCM");
}
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PASSIVE_MINIMUM_CHANNEL_TIME,
pConfig->nPassiveMinChnTime, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_PASSIVE_MINIMUM_CHANNEL_TIME"
" to CCM\n");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PASSIVE_MAXIMUM_CHANNEL_TIME,
pConfig->nPassiveMaxChnTime, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_PASSIVE_MAXIMUM_CHANNEL_TIME"
" to CCM\n");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BEACON_INTERVAL, pConfig->nBeaconInterval,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BEACON_INTERVAL to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_PS_POLL, pConfig->nMaxPsPoll,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_PS_POLL to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_CURRENT_RX_ANTENNA, pConfig-> nRxAnt, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Failure: Could not pass on WNI_CFG_CURRENT_RX_ANTENNA configuration info to HAL" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_LOW_GAIN_OVERRIDE, pConfig->fIsLowGainOverride,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_LOW_GAIN_OVERRIDE to HAL");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RSSI_FILTER_PERIOD, pConfig->nRssiFilterPeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_RSSI_FILTER_PERIOD to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_IGNORE_DTIM, pConfig->fIgnoreDtim,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_IGNORE_DTIM configuration to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PS_ENABLE_HEART_BEAT, pConfig->fEnableFwHeartBeatMonitoring,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Failure: Could not pass on WNI_CFG_PS_HEART_BEAT configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PS_ENABLE_BCN_FILTER, pConfig->fEnableFwBeaconFiltering,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Failure: Could not pass on WNI_CFG_PS_BCN_FILTER configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PS_ENABLE_RSSI_MONITOR, pConfig->fEnableFwRssiMonitoring,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Failure: Could not pass on WNI_CFG_PS_RSSI_MONITOR configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PS_DATA_INACTIVITY_TIMEOUT, pConfig->nDataInactivityTimeout,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_PS_DATA_INACTIVITY_TIMEOUT configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_NTH_BEACON_FILTER, pConfig->nthBeaconFilter,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_NTH_BEACON_FILTER configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_LTE_COEX, pConfig->enableLTECoex,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_LTE_COEX to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_PHY_AGC_LISTEN_MODE, pConfig->nEnableListenMode,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_PHY_AGC_LISTEN_MODE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_AP_KEEP_ALIVE_TIMEOUT, pConfig->apKeepAlivePeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_AP_KEEP_ALIVE_TIMEOUT to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_GO_KEEP_ALIVE_TIMEOUT, pConfig->goKeepAlivePeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_GO_KEEP_ALIVE_TIMEOUT to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_AP_LINK_MONITOR_TIMEOUT, pConfig->apLinkMonitorPeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_AP_LINK_MONITOR_TIMEOUT to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_GO_LINK_MONITOR_TIMEOUT, pConfig->goLinkMonitorPeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_GO_LINK_MONITOR_TIMEOUT to CCM");
}
#if defined WLAN_FEATURE_VOWIFI
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RRM_ENABLED, pConfig->fRrmEnable,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_RRM_ENABLE configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RRM_OPERATING_CHAN_MAX, pConfig->nInChanMeasMaxDuration,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_RRM_OPERATING_CHAN_MAX configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RRM_NON_OPERATING_CHAN_MAX, pConfig->nOutChanMeasMaxDuration,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_RRM_OUT_CHAN_MAX configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MCAST_BCAST_FILTER_SETTING, pConfig->mcastBcastFilterSetting,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
#endif
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_SINGLE_TID_RC, pConfig->bSingleTidRc,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_SINGLE_TID_RC configuration info to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TELE_BCN_WAKEUP_EN, pConfig->teleBcnWakeupEn,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_TELE_BCN_WAKEUP_EN configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TELE_BCN_TRANS_LI, pConfig->nTeleBcnTransListenInterval,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_TELE_BCN_TRANS_LI configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TELE_BCN_MAX_LI, pConfig->nTeleBcnMaxListenInterval,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_TELE_BCN_MAX_LI configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS, pConfig->nTeleBcnTransLiNumIdleBeacons,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TELE_BCN_MAX_LI_IDLE_BCNS, pConfig->nTeleBcnMaxLiNumIdleBeacons,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_TELE_BCN_MAX_LI_IDLE_BCNS configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RF_SETTLING_TIME_CLK, pConfig->rfSettlingTimeUs,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_RF_SETTLING_TIME_CLK configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_INFRA_STA_KEEP_ALIVE_PERIOD, pConfig->infraStaKeepAlivePeriod,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_INFRA_STA_KEEP_ALIVE_PERIOD configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_DYNAMIC_PS_POLL_VALUE, pConfig->dynamicPsPollValue,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_DYNAMIC_PS_POLL_VALUE configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PS_NULLDATA_AP_RESP_TIMEOUT, pConfig->nNullDataApRespTimeout,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_PS_NULLDATA_DELAY_TIMEOUT configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_AP_DATA_AVAIL_POLL_PERIOD, pConfig->apDataAvailPollPeriodInMs,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_AP_DATA_AVAIL_POLL_PERIOD configuration info to CCM" );
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_FRAGMENTATION_THRESHOLD, pConfig->FragmentationThreshold,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_FRAGMENTATION_THRESHOLD configuration info to CCM" );
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RTS_THRESHOLD, pConfig->RTSThreshold,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_RTS_THRESHOLD configuration info to CCM" );
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_11D_ENABLED, pConfig->Is11dSupportEnabled,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_11D_ENABLED configuration info to CCM" );
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_HEART_BEAT_THRESHOLD, pConfig->HeartbeatThresh24,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_HEART_BEAT_THRESHOLD configuration info to CCM" );
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_AP_DATA_AVAIL_POLL_PERIOD, pConfig->apDataAvailPollPeriodInMs,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_AP_DATA_AVAIL_POLL_PERIOD configuration info to CCM" );
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_CLOSE_LOOP,
pConfig->enableCloseLoop, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_CLOSE_LOOP to CCM");
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TX_PWR_CTRL_ENABLE,
pConfig->enableAutomaticTxPowerControl, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TX_PWR_CTRL_ENABLE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_SHORT_GI_40MHZ,
pConfig->ShortGI40MhzEnable, NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_SHORT_GI_40MHZ to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_MC_ADDR_LIST, pConfig->fEnableMCAddrList,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_MC_ADDR_LIST to CCM");
}
#ifdef WLAN_FEATURE_11AC
/* Based on cfg.ini, update the Basic MCS set, RX/TX MCS map in the cfg.dat */
/* valid values are 0(MCS0-7), 1(MCS0-8), 2(MCS0-9) */
/* we update only the least significant 2 bits in the corresponding fields */
if( (pConfig->dot11Mode == eHDD_DOT11_MODE_AUTO) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11ac_ONLY) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11ac) )
{
{
tANI_U32 temp = 0;
ccmCfgGetInt(pHddCtx->hHal, WNI_CFG_VHT_BASIC_MCS_SET, &temp);
temp = (temp & 0xFFFC) | pConfig->vhtRxMCS;
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_BASIC_MCS_SET,
temp, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_VHT_BASIC_MCS_SET to CCM");
}
ccmCfgGetInt(pHddCtx->hHal, WNI_CFG_VHT_RX_MCS_MAP, &temp);
temp = (temp & 0xFFFC) | pConfig->vhtRxMCS;
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_RX_MCS_MAP,
temp, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_VHT_RX_MCS_MAP to CCM");
}
ccmCfgGetInt(pHddCtx->hHal, WNI_CFG_VHT_TX_MCS_MAP, &temp);
temp = (temp & 0xFFFC) | pConfig->vhtTxMCS;
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_TX_MCS_MAP,
temp, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_VHT_TX_MCS_MAP to CCM");
}
/* Currently shortGI40Mhz is used for shortGI80Mhz */
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_SHORT_GI_80MHZ,
pConfig->ShortGI40MhzEnable, NULL, eANI_BOOLEAN_FALSE)
== eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass WNI_VHT_SHORT_GI_80MHZ to CCM\n");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_AMPDU_LEN_EXPONENT,
pConfig->gVhtMaxAmpduLenExp, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_VHT_AMPDU_LEN_EXPONENT to CCM");
}
}
}
#endif
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_NUM_BUFF_ADVERT,pConfig->numBuffAdvert,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_NUM_BUFF_ADVERT to CCM");
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_HT_RX_STBC,
pConfig->enableRxSTBC, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_HT_RX_STBC to CCM");
}
ccmCfgGetInt(pHddCtx->hHal, WNI_CFG_HT_CAP_INFO, &temp32);
temp16 = temp32 & 0xffff;
htCapInfo = (tSirMacHTCapabilityInfo *)&temp16;
htCapInfo->rxSTBC = pConfig->enableRxSTBC;
temp32 = temp16;
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_HT_CAP_INFO,
temp32, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_HT_CAP_INFO to CCM");
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_VHT_RXSTBC,
pConfig->enableRxSTBC, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_VHT_RXSTBC to CCM");
}
#ifdef WLAN_SOFTAP_VSTA_FEATURE
if(pConfig->fEnableVSTASupport)
{
ccmCfgGetInt(pHddCtx->hHal, WNI_CFG_ASSOC_STA_LIMIT, &val);
if( val <= WNI_CFG_ASSOC_STA_LIMIT_STADEF)
val = WNI_CFG_ASSOC_STA_LIMIT_STAMAX;
}
else
{
val = WNI_CFG_ASSOC_STA_LIMIT_STADEF;
}
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ASSOC_STA_LIMIT, val,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,"Failure: Could not pass on WNI_CFG_ASSOC_STA_LIMIT configuration info to CCM" );
}
#endif
if(ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_LPWR_IMG_TRANSITION,
pConfig->enableLpwrImgTransition, NULL, eANI_BOOLEAN_FALSE)
==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_LPWR_IMG_TRANSITION to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED, pConfig->enableMCCAdaptiveScheduler,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_MCC_ADAPTIVE_SCHED to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_DISABLE_LDPC_WITH_TXBF_AP, pConfig->disableLDPCWithTxbfAP,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_DISABLE_LDPC_WITH_TXBF_AP to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_DYNAMIC_THRESHOLD_ZERO, pConfig->retryLimitZero,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_DYNAMIC_THRESHOLD_ZERO to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_DYNAMIC_THRESHOLD_ONE, pConfig->retryLimitOne,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_DYNAMIC_THRESHOLD_ONE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_DYNAMIC_THRESHOLD_TWO, pConfig->retryLimitTwo,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_DYNAMIC_THRESHOLD_TWO to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_MEDIUM_TIME, pConfig->cfgMaxMediumTime,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_MEDIUM_TIME to CCM");
}
#ifdef FEATURE_WLAN_TDLS
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_QOS_WMM_UAPSD_MASK,
pConfig->fTDLSUapsdMask, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_QOS_WMM_UAPSD_MASK to CCM");
}
if (TRUE == pConfig->fEnableTDLSScanCoexSupport)
{
/* TDLSScanCoexistance feature is supported when the DUT acts as only
* the Sleep STA and hence explicitly disable the BufferSta capability
* on the DUT. DUT's Buffer STA capability is explicitly disabled to
* ensure that the TDLS peer shall not go to TDLS power save mode.
*/
pConfig->fEnableTDLSBufferSta = FALSE;
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_BUF_STA_ENABLED,
pConfig->fEnableTDLSBufferSta, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_BUF_STA_ENABLED to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_PUAPSD_INACT_TIME,
pConfig->fTDLSPuapsdInactivityTimer, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_PUAPSD_INACT_TIME to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_RX_FRAME_THRESHOLD,
pConfig->fTDLSRxFrameThreshold, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_RX_FRAME_THRESHOLD to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_OFF_CHANNEL_ENABLED,
pConfig->fEnableTDLSOffChannel, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_BUF_STA_ENABLED to CCM\n");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TDLS_WMM_MODE_ENABLED,
pConfig->fEnableTDLSWmmMode, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_TDLS_WMM_MODE_ENABLED to CCM\n");
}
#endif
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_ADAPT_RX_DRAIN,
pConfig->fEnableAdaptRxDrain, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_ADAPT_RX_DRAIN to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_FLEX_CONNECT_POWER_FACTOR,
pConfig->flexConnectPowerFactor, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Failure: Could not pass on "
"WNI_CFG_FLEX_CONNECT_POWER_FACTOR to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ANTENNA_DIVESITY,
pConfig->antennaDiversity, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ANTENNA_DIVESITY to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ATH_DISABLE,
pConfig->cfgAthDisable, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ATH_DISABLE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_ACTIVE_WLAN_LEN,
pConfig->cfgBtcActiveWlanLen,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_BTC_ACTIVE_WLAN_LEN to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_ACTIVE_BT_LEN,
pConfig->cfgBtcActiveBtLen,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_BTC_ACTIVE_BT_LEN to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_SAP_ACTIVE_WLAN_LEN,
pConfig->cfgBtcSapActiveWlanLen,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_BTC_ACTIVE_WLAN_LEN to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_SAP_ACTIVE_BT_LEN,
pConfig->cfgBtcSapActiveBtLen,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_BTC_ACTIVE_BT_LEN to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ASD_PROBE_INTERVAL,
pConfig->gAsdProbeInterval, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ASD_PROBE_INTERVAL to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ASD_TRIGGER_THRESHOLD,
pConfig->gAsdTriggerThreshold, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ASD_TRIGGER_THRESHOLD to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ASD_RTT_RSSI_HYST_THRESHOLD,
pConfig->gAsdRTTRssiHystThreshold, NULL,
eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ASD_RSSI_HYST_THRESHOLD to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_DEBUG_P2P_REMAIN_ON_CHANNEL,
pConfig->debugP2pRemainOnChannel,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE,
"Could not pass on WNI_CFG_DEBUG_P2P_REMAIN_ON_CHANNEL to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_CTS2S_DURING_SCO,
pConfig->cfgBtcCTS2SduringSCO,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BTC_CTS2S_DURING_SCO to CCM");
}
if(ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_DEFAULT_RATE_INDEX_24GHZ,
defHddRateToDefCfgRate(pConfig->defaultRateIndex24Ghz),
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_DEFAULT_RATE_INDEX_24GHZ to"
" CCM\n");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RA_FILTER_ENABLE, pConfig->cfgRAFilterEnable,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_RA_FILTER_ENABLE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_RA_RATE_LIMIT_INTERVAL, pConfig->cfgRARateLimitInterval,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_RA_FILTER_ENABLE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE,
pConfig->cfgBtcFatalHidnSniffBlkGuidance,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_BTC_FATAL_HID_NSNIFF_BLK_GUIDANCE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE,
pConfig->cfgBtcCriticalHidnSniffBlkGuidance,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_BTC_CRITICAL_HID_NSNIFF_BLK_GUIDANCE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD,
pConfig->cfgBtcA2dpTxQueueThold,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal,
WNI_CFG_BTC_DYN_OPP_TX_QUEUE_THOLD,
pConfig->cfgBtcOppTxQueueThold,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_BTC_DYN_OPP_TX_QUEUE_THOLD to CCM");
}
#ifdef WLAN_FEATURE_11W
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PMF_SA_QUERY_MAX_RETRIES,
pConfig->pmfSaQueryMaxRetries, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_SA_QUERY_MAX_RETRIES to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_PMF_SA_QUERY_RETRY_INTERVAL,
pConfig->pmfSaQueryRetryInterval, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_SA_QUERY_RETRY_INTERVAL to CCM");
}
#endif
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_CONSEC_SP,
pConfig->maxUapsdConsecSP, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_CONSEC_SP");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_CONSEC_RX_CNT,
pConfig->maxUapsdConsecRxCnt, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_CONSEC_RX_CNT");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_CONSEC_TX_CNT,
pConfig->maxUapsdConsecTxCnt, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_CONSEC_TX_CNT");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW,
pConfig->uapsdConsecTxCntMeasWindow, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW,
pConfig->uapsdConsecRxCntMeasWindow, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE,
pConfig->maxPsPollInWmmUapsdMode, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_MAX_UAPSD_INACTIVITY_INTERVALS,
pConfig->maxUapsdInactivityIntervals, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_MAX_UAPSD_INACTIVITY_INTERVALS");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_DYNAMIC_WMMPS,
pConfig->enableDynamicWMMPS, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_DYNAMIC_WMMPS");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BURST_MODE_BE_TXOP_VALUE,
pConfig->burstModeTXOPValue, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BURST_MODE_BE_TXOP_VALUE ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_DYNAMIC_RA_START_RATE,
pConfig->enableDynamicRAStartRate,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_ENABLE_DYNAMIC_RA_START_RATE to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BTC_FAST_WLAN_CONN_PREF,
pConfig->btcFastWlanConnPref, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BTC_FAST_WLAN_CONN_PREF ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_RTSCTS_HTVHT,
pConfig->enableRtsCtsHtVht,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_ENABLE_RTSCTS_HTVHT to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_TOGGLE_ARP_BDRATES,
pConfig->toggleArpBDRates,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on"
"WNI_CFG_TOGGLE_ARP_BDRATES to CCM");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN,
pConfig->btcStaticOppWlanIdleWlanLen, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN,
pConfig->btcStaticOppWlanIdleBtLen, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_LINK_FAIL_TIMEOUT,
pConfig->linkFailTimeout, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_LINK_FAIL_TIMEOUT ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_LINK_FAIL_TX_CNT,
pConfig->linkFailTxCnt, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_LINK_FAIL_TX_CNT ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_OPTIMIZE_CA_EVENT,
pConfig->gOptimizeCAevent, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_OPTIMIZE_CA_EVENT ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_ENABLE_MAC_ADDR_SPOOFING,
pConfig->enableMacSpoofing, NULL,
eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_ENABLE_MAC_ADDR_SPOOFING ");
}
if (ccmCfgSetInt(pHddCtx->hHal, WNI_CFG_SAR_BOFFSET_SET_CORRECTION,
pConfig->boffset_correction_enable,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
fStatus = FALSE;
hddLog(LOGE, "Could not pass on WNI_CFG_SAR_BOFFSET_SET_CORRECTION to CCM");
}
return fStatus;
}
/**---------------------------------------------------------------------------
\brief hdd_init_set_sme_config() -
This function initializes the sme configuration parameters
\param - pHddCtx - Pointer to the HDD Adapter.
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
VOS_STATUS hdd_set_sme_config( hdd_context_t *pHddCtx )
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
eHalStatus halStatus;
tpSmeConfigParams smeConfig;
hdd_config_t *pConfig = pHddCtx->cfg_ini;
smeConfig = vos_mem_malloc(sizeof(tSmeConfigParams));
if (NULL == smeConfig)
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s smeConfig allocation failed",__func__);
return eHAL_STATUS_FAILED_ALLOC;
}
vos_mem_zero( smeConfig, sizeof( tSmeConfigParams ) );
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"%s bWmmIsEnabled=%d 802_11e_enabled=%d dot11Mode=%d", __func__,
pConfig->WmmMode, pConfig->b80211eIsEnabled, pConfig->dot11Mode);
// Config params obtained from the registry
smeConfig->csrConfig.RTSThreshold = pConfig->RTSThreshold;
smeConfig->csrConfig.FragmentationThreshold = pConfig->FragmentationThreshold;
smeConfig->csrConfig.shortSlotTime = pConfig->ShortSlotTimeEnabled;
smeConfig->csrConfig.Is11dSupportEnabled = pConfig->Is11dSupportEnabled;
smeConfig->csrConfig.HeartbeatThresh24 = pConfig->HeartbeatThresh24;
smeConfig->csrConfig.phyMode = hdd_cfg_xlate_to_csr_phy_mode ( pConfig->dot11Mode );
if( (pConfig->dot11Mode == eHDD_DOT11_MODE_abg) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11b) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11g) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11b_ONLY) ||
(pConfig->dot11Mode == eHDD_DOT11_MODE_11g_ONLY))
{
smeConfig->csrConfig.channelBondingMode24GHz = 0;
smeConfig->csrConfig.channelBondingMode5GHz = 0;
}
else
{
smeConfig->csrConfig.channelBondingMode24GHz = pConfig->nChannelBondingMode24GHz;
smeConfig->csrConfig.channelBondingMode5GHz = pConfig->nChannelBondingMode5GHz;
}
smeConfig->csrConfig.TxRate = pConfig->TxRate;
smeConfig->csrConfig.nScanResultAgeCount = pConfig->ScanResultAgeCount;
smeConfig->csrConfig.scanAgeTimeNCNPS = pConfig->nScanAgeTimeNCNPS;
smeConfig->csrConfig.scanAgeTimeNCPS = pConfig->nScanAgeTimeNCPS;
smeConfig->csrConfig.scanAgeTimeCNPS = pConfig->nScanAgeTimeCNPS;
smeConfig->csrConfig.scanAgeTimeCPS = pConfig->nScanAgeTimeCPS;
smeConfig->csrConfig.AdHocChannel24 = pConfig->OperatingChannel;
smeConfig->csrConfig.fEnforce11dChannels = pConfig->fEnforce11dChannels;
smeConfig->csrConfig.fSupplicantCountryCodeHasPriority = pConfig->fSupplicantCountryCodeHasPriority;
smeConfig->csrConfig.fEnforceCountryCodeMatch = pConfig->fEnforceCountryCodeMatch;
smeConfig->csrConfig.fEnforceDefaultDomain = pConfig->fEnforceDefaultDomain;
smeConfig->csrConfig.bCatRssiOffset = pConfig->nRssiCatGap;
smeConfig->csrConfig.vccRssiThreshold = pConfig->nVccRssiTrigger;
smeConfig->csrConfig.vccUlMacLossThreshold = pConfig->nVccUlMacLossThreshold;
smeConfig->csrConfig.nRoamingTime = pConfig->nRoamingTime;
smeConfig->csrConfig.IsIdleScanEnabled = pConfig->nEnableIdleScan;
smeConfig->csrConfig.nInitialDwellTime = pConfig->nInitialDwellTime;
smeConfig->csrConfig.nActiveMaxChnTime = pConfig->nActiveMaxChnTime;
smeConfig->csrConfig.nActiveMinChnTime = pConfig->nActiveMinChnTime;
smeConfig->csrConfig.nPassiveMaxChnTime = pConfig->nPassiveMaxChnTime;
smeConfig->csrConfig.nPassiveMinChnTime = pConfig->nPassiveMinChnTime;
smeConfig->csrConfig.nActiveMaxChnTimeBtc = pConfig->nActiveMaxChnTimeBtc;
smeConfig->csrConfig.nActiveMinChnTimeBtc = pConfig->nActiveMinChnTimeBtc;
smeConfig->csrConfig.disableAggWithBtc = pConfig->disableAggWithBtc;
#ifdef WLAN_AP_STA_CONCURRENCY
smeConfig->csrConfig.nActiveMaxChnTimeConc = pConfig->nActiveMaxChnTimeConc;
smeConfig->csrConfig.nActiveMinChnTimeConc = pConfig->nActiveMinChnTimeConc;
smeConfig->csrConfig.nPassiveMaxChnTimeConc = pConfig->nPassiveMaxChnTimeConc;
smeConfig->csrConfig.nPassiveMinChnTimeConc = pConfig->nPassiveMinChnTimeConc;
smeConfig->csrConfig.nRestTimeConc = pConfig->nRestTimeConc;
smeConfig->csrConfig.nNumStaChanCombinedConc = pConfig->nNumStaChanCombinedConc;
smeConfig->csrConfig.nNumP2PChanCombinedConc = pConfig->nNumP2PChanCombinedConc;
#endif
smeConfig->csrConfig.Is11eSupportEnabled = pConfig->b80211eIsEnabled;
smeConfig->csrConfig.WMMSupportMode = pConfig->WmmMode;
#if defined WLAN_FEATURE_VOWIFI
smeConfig->rrmConfig.rrmEnabled = pConfig->fRrmEnable;
smeConfig->rrmConfig.maxRandnInterval = pConfig->nRrmRandnIntvl;
#endif
//Remaining config params not obtained from registry
// On RF EVB beacon using channel 1.
#ifdef WLAN_FEATURE_11AC
smeConfig->csrConfig.nVhtChannelWidth = pConfig->vhtChannelWidth;
smeConfig->csrConfig.enableTxBF = pConfig->enableTxBF;
smeConfig->csrConfig.txBFCsnValue = pConfig->txBFCsnValue;
smeConfig->csrConfig.enableVhtFor24GHz = pConfig->enableVhtFor24GHzBand;
/* Consider Mu-beamformee only if SU-beamformee is enabled */
if ( pConfig->enableTxBF )
smeConfig->csrConfig.enableMuBformee = pConfig->enableMuBformee;
else
smeConfig->csrConfig.enableMuBformee = 0;
#endif
smeConfig->csrConfig.AdHocChannel5G = pConfig->AdHocChannel5G;
smeConfig->csrConfig.AdHocChannel24 = pConfig->AdHocChannel24G;
smeConfig->csrConfig.ProprietaryRatesEnabled = 0;
smeConfig->csrConfig.HeartbeatThresh50 = 40;
smeConfig->csrConfig.bandCapability = pConfig->nBandCapability;
if (pConfig->nBandCapability == eCSR_BAND_24)
{
smeConfig->csrConfig.Is11hSupportEnabled = 0;
} else {
smeConfig->csrConfig.Is11hSupportEnabled = pConfig->Is11hSupportEnabled;
}
smeConfig->csrConfig.cbChoice = 0;
smeConfig->csrConfig.bgScanInterval = 0;
smeConfig->csrConfig.eBand = pConfig->nBandCapability;
smeConfig->csrConfig.nTxPowerCap = pConfig->nTxPowerCap;
smeConfig->csrConfig.fEnableBypass11d = pConfig->enableBypass11d;
smeConfig->csrConfig.fEnableDFSChnlScan = pConfig->enableDFSChnlScan;
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
smeConfig->csrConfig.nRoamPrefer5GHz = pConfig->nRoamPrefer5GHz;
smeConfig->csrConfig.nRoamIntraBand = pConfig->nRoamIntraBand;
smeConfig->csrConfig.nProbes = pConfig->nProbes;
smeConfig->csrConfig.nRoamScanHomeAwayTime = pConfig->nRoamScanHomeAwayTime;
#endif
smeConfig->csrConfig.fFirstScanOnly2GChnl = pConfig->enableFirstScan2GOnly;
//FIXME 11d config is hardcoded
if ( VOS_STA_SAP_MODE != hdd_get_conparam())
{
smeConfig->csrConfig.Csr11dinfo.Channels.numChannels = 0;
/* if there is a requirement that HDD will control the default
* channel list & country code (say from .ini file) we need to
* add some logic here. Otherwise the default 11d info should
* come from NV as per our current implementation */
}
else
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"AP country Code %s", pConfig->apCntryCode);
if (memcmp(pConfig->apCntryCode, CFG_AP_COUNTRY_CODE_DEFAULT, 3) != 0)
sme_setRegInfo(pHddCtx->hHal, pConfig->apCntryCode);
sme_set11dinfo(pHddCtx->hHal, smeConfig);
}
hdd_set_power_save_config(pHddCtx, smeConfig);
hdd_set_btc_config(pHddCtx);
#ifdef WLAN_FEATURE_VOWIFI_11R
smeConfig->csrConfig.csr11rConfig.IsFTResourceReqSupported = pConfig->fFTResourceReqSupported;
#endif
#ifdef FEATURE_WLAN_LFR
smeConfig->csrConfig.isFastRoamIniFeatureEnabled = pConfig->isFastRoamIniFeatureEnabled;
smeConfig->csrConfig.MAWCEnabled = pConfig->MAWCEnabled;
#endif
#ifdef FEATURE_WLAN_ESE
smeConfig->csrConfig.isEseIniFeatureEnabled = pConfig->isEseIniFeatureEnabled;
if( pConfig->isEseIniFeatureEnabled )
{
pConfig->isFastTransitionEnabled = TRUE;
}
#endif
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
smeConfig->csrConfig.isFastTransitionEnabled = pConfig->isFastTransitionEnabled;
smeConfig->csrConfig.RoamRssiDiff = pConfig->RoamRssiDiff;
smeConfig->csrConfig.nImmediateRoamRssiDiff = pConfig->nImmediateRoamRssiDiff;
smeConfig->csrConfig.isWESModeEnabled = pConfig->isWESModeEnabled;
#endif
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
smeConfig->csrConfig.isRoamOffloadScanEnabled = pConfig->isRoamOffloadScanEnabled;
smeConfig->csrConfig.bFastRoamInConIniFeatureEnabled = pConfig->bFastRoamInConIniFeatureEnabled;
if (0 == smeConfig->csrConfig.isRoamOffloadScanEnabled)
{
/* Disable roaming in concurrency if roam scan offload is disabled */
smeConfig->csrConfig.bFastRoamInConIniFeatureEnabled = 0;
}
#endif
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
smeConfig->csrConfig.neighborRoamConfig.nNeighborReassocRssiThreshold = pConfig->nNeighborReassocRssiThreshold;
smeConfig->csrConfig.neighborRoamConfig.nNeighborLookupRssiThreshold = pConfig->nNeighborLookupRssiThreshold;
smeConfig->csrConfig.neighborRoamConfig.nNeighborScanMaxChanTime = pConfig->nNeighborScanMaxChanTime;
smeConfig->csrConfig.neighborRoamConfig.nNeighborScanMinChanTime = pConfig->nNeighborScanMinChanTime;
smeConfig->csrConfig.neighborRoamConfig.nNeighborScanTimerPeriod = pConfig->nNeighborScanPeriod;
smeConfig->csrConfig.neighborRoamConfig.nMaxNeighborRetries = pConfig->nMaxNeighborReqTries;
smeConfig->csrConfig.neighborRoamConfig.nNeighborResultsRefreshPeriod = pConfig->nNeighborResultsRefreshPeriod;
smeConfig->csrConfig.neighborRoamConfig.nEmptyScanRefreshPeriod = pConfig->nEmptyScanRefreshPeriod;
//Making Forced 5G roaming to tightly coupled with the gEnableFirstScan2GOnly
//=1 only, Also making sure if HW does not support 5G RF band then no need to
//enable this feature even though it is enabled in .ini.
if((pConfig->enableFirstScan2GOnly) && (pConfig->nBandCapability != eCSR_BAND_24))
{
smeConfig->csrConfig.neighborRoamConfig.nNeighborInitialForcedRoamTo5GhEnable
= pConfig->nNeighborInitialForcedRoamTo5GhEnable;
}
hdd_string_to_u8_array( pConfig->neighborScanChanList,
smeConfig->csrConfig.neighborRoamConfig.neighborScanChanList.channelList,
&smeConfig->csrConfig.neighborRoamConfig.neighborScanChanList.numChannels,
WNI_CFG_VALID_CHANNEL_LIST_LEN );
#endif
smeConfig->csrConfig.addTSWhenACMIsOff = pConfig->AddTSWhenACMIsOff;
smeConfig->csrConfig.fValidateList = pConfig->fValidateScanList;
smeConfig->csrConfig.allowDFSChannelRoam = pConfig->allowDFSChannelRoam;
//Enable/Disable MCC
smeConfig->csrConfig.fEnableMCCMode = pConfig->enableMCC;
smeConfig->csrConfig.fAllowMCCGODiffBI = pConfig->allowMCCGODiffBI;
//Scan Results Aging Time out value
smeConfig->csrConfig.scanCfgAgingTime = pConfig->scanAgingTimeout;
smeConfig->csrConfig.enableTxLdpc = pConfig->enableTxLdpc;
smeConfig->csrConfig.isAmsduSupportInAMPDU = pConfig->isAmsduSupportInAMPDU;
if(pConfig->nBandCapability != eCSR_BAND_24)
{
smeConfig->csrConfig.nSelect5GHzMargin = pConfig->nSelect5GHzMargin;
}
smeConfig->csrConfig.ignorePeerErpInfo = pConfig->ignorePeerErpInfo;
smeConfig->csrConfig.ignorePeerHTopMode = pConfig->ignorePeerHTopMode;
smeConfig->csrConfig.disableP2PMacSpoofing = pConfig->disableP2PMacSpoofing;
smeConfig->csrConfig.initialScanSkipDFSCh = pConfig->initialScanSkipDFSCh;
smeConfig->csrConfig.isCoalesingInIBSSAllowed =
pHddCtx->cfg_ini->isCoalesingInIBSSAllowed;
/* update SSR config */
sme_UpdateEnableSSR((tHalHandle)(pHddCtx->hHal), pHddCtx->cfg_ini->enableSSR);
/* Update the Directed scan offload setting */
smeConfig->fScanOffload = pHddCtx->cfg_ini->fScanOffload;
smeConfig->csrConfig.scanBandPreference =
pHddCtx->cfg_ini->acsScanBandPreference;
smeConfig->fEnableDebugLog = pHddCtx->cfg_ini->gEnableDebugLog;
smeConfig->csrConfig.sendDeauthBeforeCon = pConfig->sendDeauthBeforeCon;
smeConfig->csrConfig.nOBSSScanWidthTriggerInterval =
pConfig->nOBSSScanWidthTriggerInterval;
smeConfig->fDeferIMPSTime = pHddCtx->cfg_ini->deferImpsTime;
smeConfig->fBtcEnableIndTimerVal = pHddCtx->cfg_ini->btcEnableIndTimerVal;
smeConfig->csrConfig.roamDelayStatsEnabled = pHddCtx->cfg_ini->gEnableRoamDelayStats;
vos_set_multicast_logging(pHddCtx->cfg_ini->multicast_host_msgs);
halStatus = sme_UpdateConfig( pHddCtx->hHal, smeConfig);
if ( !HAL_STATUS_SUCCESS( halStatus ) )
{
status = VOS_STATUS_E_FAILURE;
hddLog(LOGE, "sme_UpdateConfig() return failure %d", halStatus);
}
vos_mem_free(smeConfig);
return status;
}
/**---------------------------------------------------------------------------
\brief hdd_execute_config_command() -
This function executes an arbitrary configuration set command
\param - pHddCtx - Pointer to the HDD Adapter.
\parmm - command - a configuration command of the form:
<name>=<value>
\return - 0 for success, non zero for failure
--------------------------------------------------------------------------*/
VOS_STATUS hdd_execute_config_command(hdd_context_t *pHddCtx, char *command)
{
size_t tableSize = sizeof(g_registry_table)/sizeof(g_registry_table[0]);
REG_TABLE_ENTRY *pRegEntry;
char *clone;
char *pCmd;
void *pField;
char *name;
char *value_str;
v_U32_t value;
v_S31_t svalue;
size_t len_value_str;
unsigned int idx;
unsigned int i;
VOS_STATUS vstatus;
int rv;
// assume failure until proven otherwise
vstatus = VOS_STATUS_E_FAILURE;
// clone the command so that we can manipulate it
clone = kstrdup(command, GFP_ATOMIC);
if (NULL == clone)
{
hddLog(LOGE, "%s: memory allocation failure, unable to process [%s]",
__func__, command);
return vstatus;
}
// 'clone' will point to the beginning of the string so it can be freed
// 'pCmd' will be used to walk/parse the command
pCmd = clone;
// get rid of leading/trailing whitespace
pCmd = i_trim(pCmd);
if ('\0' == *pCmd)
{
// only whitespace
hddLog(LOGE, "%s: invalid command, only whitespace:[%s]",
__func__, command);
goto done;
}
// parse the <name> = <value>
name = pCmd;
while (('=' != *pCmd) && ('\0' != *pCmd))
{
pCmd++;
}
if ('\0' == *pCmd)
{
// did not find '='
hddLog(LOGE, "%s: invalid command, no '=':[%s]",
__func__, command);
goto done;
}
// replace '=' with NUL to terminate the <name>
*pCmd++ = '\0';
name = i_trim(name);
if ('\0' == *name)
{
// did not find a name
hddLog(LOGE, "%s: invalid command, no <name>:[%s]",
__func__, command);
goto done;
}
value_str = i_trim(pCmd);
if ('\0' == *value_str)
{
// did not find a value
hddLog(LOGE, "%s: invalid command, no <value>:[%s]",
__func__, command);
goto done;
}
// lookup the configuration item
for (idx = 0; idx < tableSize; idx++)
{
if (0 == strcmp(name, g_registry_table[idx].RegName))
{
// found a match
break;
}
}
if (tableSize == idx)
{
// did not match the name
hddLog(LOGE, "%s: invalid command, unknown configuration item:[%s]",
__func__, command);
goto done;
}
pRegEntry = &g_registry_table[idx];
if (!(pRegEntry->Flags & VAR_FLAGS_DYNAMIC_CFG))
{
// does not support dynamic configuration
hddLog(LOGE, "%s: invalid command, %s does not support "
"dynamic configuration", __func__, name);
goto done;
}
pField = ((v_U8_t *)pHddCtx->cfg_ini) + pRegEntry->VarOffset;
switch (pRegEntry->RegType)
{
case WLAN_PARAM_Integer:
rv = kstrtou32(value_str, 10, &value);
if (rv < 0)
goto done;
if (value < pRegEntry->VarMin)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %u < min value %lu",
__func__, value, pRegEntry->VarMin);
goto done;
}
if (value > pRegEntry->VarMax)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %u > max value %lu",
__func__, value, pRegEntry->VarMax);
goto done;
}
memcpy(pField, &value, pRegEntry->VarSize);
break;
case WLAN_PARAM_HexInteger:
rv = kstrtou32(value_str, 16, &value);
if (rv < 0)
goto done;
if (value < pRegEntry->VarMin)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %x < min value %lx",
__func__, value, pRegEntry->VarMin);
goto done;
}
if (value > pRegEntry->VarMax)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %x > max value %lx",
__func__, value, pRegEntry->VarMax);
goto done;
}
memcpy(pField, &value, pRegEntry->VarSize);
break;
case WLAN_PARAM_SignedInteger:
rv = kstrtos32(value_str, 10, &svalue);
if (rv < 0)
goto done;
if (svalue < (v_S31_t)pRegEntry->VarMin)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %d < min value %d",
__func__, svalue, (int)pRegEntry->VarMin);
goto done;
}
if (svalue > (v_S31_t)pRegEntry->VarMax)
{
// out of range
hddLog(LOGE, "%s: invalid command, value %d > max value %d",
__func__, svalue, (int)pRegEntry->VarMax);
goto done;
}
memcpy(pField, &svalue, pRegEntry->VarSize);
break;
case WLAN_PARAM_String:
len_value_str = strlen(value_str);
if (len_value_str > (pRegEntry->VarSize - 1))
{
// too big
hddLog(LOGE,
"%s: invalid command, string [%s] length "
"%zu exceeds maximum length %u",
__func__, value_str,
len_value_str, (pRegEntry->VarSize - 1));
goto done;
}
// copy string plus NUL
memcpy(pField, value_str, (len_value_str + 1));
break;
case WLAN_PARAM_MacAddr:
len_value_str = strlen(value_str);
if (len_value_str != (VOS_MAC_ADDR_SIZE * 2))
{
// out of range
hddLog(LOGE,
"%s: invalid command, MAC address [%s] length "
"%zu is not expected length %u",
__func__, value_str,
len_value_str, (VOS_MAC_ADDR_SIZE * 2));
goto done;
}
//parse the string and store it in the byte array
for (i = 0; i < VOS_MAC_ADDR_SIZE; i++)
{
((char*)pField)[i] = (char)
((parseHexDigit(value_str[(i * 2)]) * 16) +
parseHexDigit(value_str[(i * 2) + 1]));
}
break;
default:
goto done;
}
// if we get here, we had a successful modification
vstatus = VOS_STATUS_SUCCESS;
// config table has been modified, is there a notifier?
if (NULL != pRegEntry->pfnDynamicNotify)
{
(pRegEntry->pfnDynamicNotify)(pHddCtx, pRegEntry->NotifyId);
}
// note that this item was explicitly configured
if (idx < MAX_CFG_INI_ITEMS)
{
set_bit(idx, (void *)&pHddCtx->cfg_ini->bExplicitCfg);
}
done:
kfree(clone);
return vstatus;
}
/**---------------------------------------------------------------------------
\brief hdd_is_okc_mode_enabled() -
This function returns whether OKC mode is enabled or not
\param - pHddCtx - Pointer to the HDD Adapter.
\return - 1 for enabled, zero for disabled
--------------------------------------------------------------------------*/
tANI_BOOLEAN hdd_is_okc_mode_enabled(hdd_context_t *pHddCtx)
{
if (NULL == pHddCtx)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: pHddCtx is NULL", __func__);
return -EINVAL;
}
#ifdef FEATURE_WLAN_OKC
return pHddCtx->cfg_ini->isOkcIniFeatureEnabled;
#else
return eANI_BOOLEAN_FALSE;
#endif
}
| gpl-2.0 |
latlontude/linux | drivers/staging/comedi/drivers/me4000.c | 77 | 42452 | /*
comedi/drivers/me4000.c
Source code for the Meilhaus ME-4000 board family.
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/*
Driver: me4000
Description: Meilhaus ME-4000 series boards
Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i, ME-4680is
Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
Updated: Mon, 18 Mar 2002 15:34:01 -0800
Status: broken (no support for loading firmware)
Supports:
- Analog Input
- Analog Output
- Digital I/O
- Counter
Configuration Options: not applicable, uses PCI auto config
The firmware required by these boards is available in the
comedi_nonfree_firmware tarball available from
http://www.comedi.org. However, the driver's support for
loading the firmware through comedi_config is currently
broken.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "8253.h"
#include "plx9052.h"
#define ME4000_FIRMWARE "me4000_firmware.bin"
/*
* ME4000 Register map and bit defines
*/
#define ME4000_AO_CHAN(x) ((x) * 0x18)
#define ME4000_AO_CTRL_REG(x) (0x00 + ME4000_AO_CHAN(x))
#define ME4000_AO_CTRL_BIT_MODE_0 (1 << 0)
#define ME4000_AO_CTRL_BIT_MODE_1 (1 << 1)
#define ME4000_AO_CTRL_MASK_MODE (3 << 0)
#define ME4000_AO_CTRL_BIT_STOP (1 << 2)
#define ME4000_AO_CTRL_BIT_ENABLE_FIFO (1 << 3)
#define ME4000_AO_CTRL_BIT_ENABLE_EX_TRIG (1 << 4)
#define ME4000_AO_CTRL_BIT_EX_TRIG_EDGE (1 << 5)
#define ME4000_AO_CTRL_BIT_IMMEDIATE_STOP (1 << 7)
#define ME4000_AO_CTRL_BIT_ENABLE_DO (1 << 8)
#define ME4000_AO_CTRL_BIT_ENABLE_IRQ (1 << 9)
#define ME4000_AO_CTRL_BIT_RESET_IRQ (1 << 10)
#define ME4000_AO_STATUS_REG(x) (0x04 + ME4000_AO_CHAN(x))
#define ME4000_AO_STATUS_BIT_FSM (1 << 0)
#define ME4000_AO_STATUS_BIT_FF (1 << 1)
#define ME4000_AO_STATUS_BIT_HF (1 << 2)
#define ME4000_AO_STATUS_BIT_EF (1 << 3)
#define ME4000_AO_FIFO_REG(x) (0x08 + ME4000_AO_CHAN(x))
#define ME4000_AO_SINGLE_REG(x) (0x0c + ME4000_AO_CHAN(x))
#define ME4000_AO_TIMER_REG(x) (0x10 + ME4000_AO_CHAN(x))
#define ME4000_AI_CTRL_REG 0x74
#define ME4000_AI_STATUS_REG 0x74
#define ME4000_AI_CTRL_BIT_MODE_0 (1 << 0)
#define ME4000_AI_CTRL_BIT_MODE_1 (1 << 1)
#define ME4000_AI_CTRL_BIT_MODE_2 (1 << 2)
#define ME4000_AI_CTRL_BIT_SAMPLE_HOLD (1 << 3)
#define ME4000_AI_CTRL_BIT_IMMEDIATE_STOP (1 << 4)
#define ME4000_AI_CTRL_BIT_STOP (1 << 5)
#define ME4000_AI_CTRL_BIT_CHANNEL_FIFO (1 << 6)
#define ME4000_AI_CTRL_BIT_DATA_FIFO (1 << 7)
#define ME4000_AI_CTRL_BIT_FULLSCALE (1 << 8)
#define ME4000_AI_CTRL_BIT_OFFSET (1 << 9)
#define ME4000_AI_CTRL_BIT_EX_TRIG_ANALOG (1 << 10)
#define ME4000_AI_CTRL_BIT_EX_TRIG (1 << 11)
#define ME4000_AI_CTRL_BIT_EX_TRIG_FALLING (1 << 12)
#define ME4000_AI_CTRL_BIT_EX_IRQ (1 << 13)
#define ME4000_AI_CTRL_BIT_EX_IRQ_RESET (1 << 14)
#define ME4000_AI_CTRL_BIT_LE_IRQ (1 << 15)
#define ME4000_AI_CTRL_BIT_LE_IRQ_RESET (1 << 16)
#define ME4000_AI_CTRL_BIT_HF_IRQ (1 << 17)
#define ME4000_AI_CTRL_BIT_HF_IRQ_RESET (1 << 18)
#define ME4000_AI_CTRL_BIT_SC_IRQ (1 << 19)
#define ME4000_AI_CTRL_BIT_SC_IRQ_RESET (1 << 20)
#define ME4000_AI_CTRL_BIT_SC_RELOAD (1 << 21)
#define ME4000_AI_STATUS_BIT_EF_CHANNEL (1 << 22)
#define ME4000_AI_STATUS_BIT_HF_CHANNEL (1 << 23)
#define ME4000_AI_STATUS_BIT_FF_CHANNEL (1 << 24)
#define ME4000_AI_STATUS_BIT_EF_DATA (1 << 25)
#define ME4000_AI_STATUS_BIT_HF_DATA (1 << 26)
#define ME4000_AI_STATUS_BIT_FF_DATA (1 << 27)
#define ME4000_AI_STATUS_BIT_LE (1 << 28)
#define ME4000_AI_STATUS_BIT_FSM (1 << 29)
#define ME4000_AI_CTRL_BIT_EX_TRIG_BOTH (1 << 31)
#define ME4000_AI_CHANNEL_LIST_REG 0x78
#define ME4000_AI_LIST_INPUT_SINGLE_ENDED (0 << 5)
#define ME4000_AI_LIST_INPUT_DIFFERENTIAL (1 << 5)
#define ME4000_AI_LIST_RANGE_BIPOLAR_10 (0 << 6)
#define ME4000_AI_LIST_RANGE_BIPOLAR_2_5 (1 << 6)
#define ME4000_AI_LIST_RANGE_UNIPOLAR_10 (2 << 6)
#define ME4000_AI_LIST_RANGE_UNIPOLAR_2_5 (3 << 6)
#define ME4000_AI_LIST_LAST_ENTRY (1 << 8)
#define ME4000_AI_DATA_REG 0x7c
#define ME4000_AI_CHAN_TIMER_REG 0x80
#define ME4000_AI_CHAN_PRE_TIMER_REG 0x84
#define ME4000_AI_SCAN_TIMER_LOW_REG 0x88
#define ME4000_AI_SCAN_TIMER_HIGH_REG 0x8c
#define ME4000_AI_SCAN_PRE_TIMER_LOW_REG 0x90
#define ME4000_AI_SCAN_PRE_TIMER_HIGH_REG 0x94
#define ME4000_AI_START_REG 0x98
#define ME4000_IRQ_STATUS_REG 0x9c
#define ME4000_IRQ_STATUS_BIT_EX (1 << 0)
#define ME4000_IRQ_STATUS_BIT_LE (1 << 1)
#define ME4000_IRQ_STATUS_BIT_AI_HF (1 << 2)
#define ME4000_IRQ_STATUS_BIT_AO_0_HF (1 << 3)
#define ME4000_IRQ_STATUS_BIT_AO_1_HF (1 << 4)
#define ME4000_IRQ_STATUS_BIT_AO_2_HF (1 << 5)
#define ME4000_IRQ_STATUS_BIT_AO_3_HF (1 << 6)
#define ME4000_IRQ_STATUS_BIT_SC (1 << 7)
#define ME4000_DIO_PORT_0_REG 0xa0
#define ME4000_DIO_PORT_1_REG 0xa4
#define ME4000_DIO_PORT_2_REG 0xa8
#define ME4000_DIO_PORT_3_REG 0xac
#define ME4000_DIO_DIR_REG 0xb0
#define ME4000_AO_LOADSETREG_XX 0xb4
#define ME4000_DIO_CTRL_REG 0xb8
#define ME4000_DIO_CTRL_BIT_MODE_0 (1 << 0)
#define ME4000_DIO_CTRL_BIT_MODE_1 (1 << 1)
#define ME4000_DIO_CTRL_BIT_MODE_2 (1 << 2)
#define ME4000_DIO_CTRL_BIT_MODE_3 (1 << 3)
#define ME4000_DIO_CTRL_BIT_MODE_4 (1 << 4)
#define ME4000_DIO_CTRL_BIT_MODE_5 (1 << 5)
#define ME4000_DIO_CTRL_BIT_MODE_6 (1 << 6)
#define ME4000_DIO_CTRL_BIT_MODE_7 (1 << 7)
#define ME4000_DIO_CTRL_BIT_FUNCTION_0 (1 << 8)
#define ME4000_DIO_CTRL_BIT_FUNCTION_1 (1 << 9)
#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_0 (1 << 10)
#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_1 (1 << 11)
#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_2 (1 << 12)
#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_3 (1 << 13)
#define ME4000_AO_DEMUX_ADJUST_REG 0xbc
#define ME4000_AO_DEMUX_ADJUST_VALUE 0x4c
#define ME4000_AI_SAMPLE_COUNTER_REG 0xc0
#define ME4000_AI_FIFO_COUNT 2048
#define ME4000_AI_MIN_TICKS 66
#define ME4000_AI_MIN_SAMPLE_TIME 2000
#define ME4000_AI_CHANNEL_LIST_COUNT 1024
struct me4000_info {
unsigned long plx_regbase;
unsigned long timer_regbase;
};
enum me4000_boardid {
BOARD_ME4650,
BOARD_ME4660,
BOARD_ME4660I,
BOARD_ME4660S,
BOARD_ME4660IS,
BOARD_ME4670,
BOARD_ME4670I,
BOARD_ME4670S,
BOARD_ME4670IS,
BOARD_ME4680,
BOARD_ME4680I,
BOARD_ME4680S,
BOARD_ME4680IS,
};
struct me4000_board {
const char *name;
int ao_nchan;
int ao_fifo;
int ai_nchan;
int ai_diff_nchan;
int ai_sh_nchan;
int ex_trig_analog;
int dio_nchan;
int has_counter;
};
static const struct me4000_board me4000_boards[] = {
[BOARD_ME4650] = {
.name = "ME-4650",
.ai_nchan = 16,
.dio_nchan = 32,
},
[BOARD_ME4660] = {
.name = "ME-4660",
.ai_nchan = 32,
.ai_diff_nchan = 16,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4660I] = {
.name = "ME-4660i",
.ai_nchan = 32,
.ai_diff_nchan = 16,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4660S] = {
.name = "ME-4660s",
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4660IS] = {
.name = "ME-4660is",
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4670] = {
.name = "ME-4670",
.ao_nchan = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4670I] = {
.name = "ME-4670i",
.ao_nchan = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4670S] = {
.name = "ME-4670s",
.ao_nchan = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4670IS] = {
.name = "ME-4670is",
.ao_nchan = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4680] = {
.name = "ME-4680",
.ao_nchan = 4,
.ao_fifo = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4680I] = {
.name = "ME-4680i",
.ao_nchan = 4,
.ao_fifo = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4680S] = {
.name = "ME-4680s",
.ao_nchan = 4,
.ao_fifo = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
[BOARD_ME4680IS] = {
.name = "ME-4680is",
.ao_nchan = 4,
.ao_fifo = 4,
.ai_nchan = 32,
.ai_diff_nchan = 16,
.ai_sh_nchan = 8,
.ex_trig_analog = 1,
.dio_nchan = 32,
.has_counter = 1,
},
};
static const struct comedi_lrange me4000_ai_range = {
4, {
UNI_RANGE(2.5),
UNI_RANGE(10),
BIP_RANGE(2.5),
BIP_RANGE(10)
}
};
static int me4000_xilinx_download(struct comedi_device *dev,
const u8 *data, size_t size,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct me4000_info *info = dev->private;
unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
unsigned int file_length;
unsigned int val;
unsigned int i;
if (!xilinx_iobase)
return -ENODEV;
/*
* Set PLX local interrupt 2 polarity to high.
* Interrupt is thrown by init pin of xilinx.
*/
outl(PLX9052_INTCSR_LI2POL, info->plx_regbase + PLX9052_INTCSR);
/* Set /CS and /WRITE of the Xilinx */
val = inl(info->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_UIO2_DATA;
outl(val, info->plx_regbase + PLX9052_CNTRL);
/* Init Xilinx with CS1 */
inb(xilinx_iobase + 0xC8);
/* Wait until /INIT pin is set */
udelay(20);
val = inl(info->plx_regbase + PLX9052_INTCSR);
if (!(val & PLX9052_INTCSR_LI2STAT)) {
dev_err(dev->class_dev, "Can't init Xilinx\n");
return -EIO;
}
/* Reset /CS and /WRITE of the Xilinx */
val = inl(info->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_UIO2_DATA;
outl(val, info->plx_regbase + PLX9052_CNTRL);
/* Download Xilinx firmware */
file_length = (((unsigned int)data[0] & 0xff) << 24) +
(((unsigned int)data[1] & 0xff) << 16) +
(((unsigned int)data[2] & 0xff) << 8) +
((unsigned int)data[3] & 0xff);
udelay(10);
for (i = 0; i < file_length; i++) {
outb(data[16 + i], xilinx_iobase);
udelay(10);
/* Check if BUSY flag is low */
val = inl(info->plx_regbase + PLX9052_CNTRL);
if (val & PLX9052_CNTRL_UIO1_DATA) {
dev_err(dev->class_dev,
"Xilinx is still busy (i = %d)\n", i);
return -EIO;
}
}
/* If done flag is high download was successful */
val = inl(info->plx_regbase + PLX9052_CNTRL);
if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
dev_err(dev->class_dev, "DONE flag is not set\n");
dev_err(dev->class_dev, "Download not successful\n");
return -EIO;
}
/* Set /CS and /WRITE */
val = inl(info->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_UIO2_DATA;
outl(val, info->plx_regbase + PLX9052_CNTRL);
return 0;
}
static void me4000_reset(struct comedi_device *dev)
{
struct me4000_info *info = dev->private;
unsigned int val;
int chan;
/* Make a hardware reset */
val = inl(info->plx_regbase + PLX9052_CNTRL);
val |= PLX9052_CNTRL_PCI_RESET;
outl(val, info->plx_regbase + PLX9052_CNTRL);
val &= ~PLX9052_CNTRL_PCI_RESET;
outl(val, info->plx_regbase + PLX9052_CNTRL);
/* 0x8000 to the DACs means an output voltage of 0V */
for (chan = 0; chan < 4; chan++)
outl(0x8000, dev->iobase + ME4000_AO_SINGLE_REG(chan));
/* Set both stop bits in the analog input control register */
outl(ME4000_AI_CTRL_BIT_IMMEDIATE_STOP | ME4000_AI_CTRL_BIT_STOP,
dev->iobase + ME4000_AI_CTRL_REG);
/* Set both stop bits in the analog output control register */
val = ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP;
for (chan = 0; chan < 4; chan++)
outl(val, dev->iobase + ME4000_AO_CTRL_REG(chan));
/* Enable interrupts on the PLX */
outl(PLX9052_INTCSR_LI1ENAB |
PLX9052_INTCSR_LI1POL |
PLX9052_INTCSR_PCIENAB, info->plx_regbase + PLX9052_INTCSR);
/* Set the adustment register for AO demux */
outl(ME4000_AO_DEMUX_ADJUST_VALUE,
dev->iobase + ME4000_AO_DEMUX_ADJUST_REG);
/*
* Set digital I/O direction for port 0
* to output on isolated versions
*/
if (!(inl(dev->iobase + ME4000_DIO_DIR_REG) & 0x1))
outl(0x1, dev->iobase + ME4000_DIO_CTRL_REG);
}
/*=============================================================================
Analog input section
===========================================================================*/
static int me4000_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *subdevice,
struct comedi_insn *insn, unsigned int *data)
{
const struct me4000_board *thisboard = dev->board_ptr;
int chan = CR_CHAN(insn->chanspec);
int rang = CR_RANGE(insn->chanspec);
int aref = CR_AREF(insn->chanspec);
unsigned int entry = 0;
unsigned int tmp;
unsigned int lval;
if (insn->n == 0) {
return 0;
} else if (insn->n > 1) {
dev_err(dev->class_dev, "Invalid instruction length %d\n",
insn->n);
return -EINVAL;
}
switch (rang) {
case 0:
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
break;
case 1:
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
break;
case 2:
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
break;
case 3:
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
break;
default:
dev_err(dev->class_dev, "Invalid range specified\n");
return -EINVAL;
}
switch (aref) {
case AREF_GROUND:
case AREF_COMMON:
if (chan >= thisboard->ai_nchan) {
dev_err(dev->class_dev,
"Analog input is not available\n");
return -EINVAL;
}
entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED | chan;
break;
case AREF_DIFF:
if (rang == 0 || rang == 1) {
dev_err(dev->class_dev,
"Range must be bipolar when aref = diff\n");
return -EINVAL;
}
if (chan >= thisboard->ai_diff_nchan) {
dev_err(dev->class_dev,
"Analog input is not available\n");
return -EINVAL;
}
entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL | chan;
break;
default:
dev_err(dev->class_dev, "Invalid aref specified\n");
return -EINVAL;
}
entry |= ME4000_AI_LIST_LAST_ENTRY;
/* Clear channel list, data fifo and both stop bits */
tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
tmp &= ~(ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
ME4000_AI_CTRL_BIT_DATA_FIFO |
ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Set the acquisition mode to single */
tmp &= ~(ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_MODE_1 |
ME4000_AI_CTRL_BIT_MODE_2);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Enable channel list and data fifo */
tmp |= ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Generate channel list entry */
outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
/* Set the timer to maximum sample rate */
outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
/* Start conversion by dummy read */
inl(dev->iobase + ME4000_AI_START_REG);
/* Wait until ready */
udelay(10);
if (!(inl(dev->iobase + ME4000_AI_STATUS_REG) &
ME4000_AI_STATUS_BIT_EF_DATA)) {
dev_err(dev->class_dev, "Value not available after wait\n");
return -EIO;
}
/* Read value from data fifo */
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
data[0] = lval ^ 0x8000;
return 1;
}
static int me4000_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int tmp;
/* Stop any running conversion */
tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
tmp &= ~(ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Clear the control register */
outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
return 0;
}
static int me4000_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct me4000_board *board = dev->board_ptr;
unsigned int max_diff_chan = board->ai_diff_nchan;
unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
int i;
for (i = 0; i < cmd->chanlist_len; i++) {
unsigned int chan = CR_CHAN(cmd->chanlist[i]);
unsigned int range = CR_RANGE(cmd->chanlist[i]);
unsigned int aref = CR_AREF(cmd->chanlist[i]);
if (aref != aref0) {
dev_dbg(dev->class_dev,
"Mode is not equal for all entries\n");
return -EINVAL;
}
if (aref == AREF_DIFF) {
if (chan >= max_diff_chan) {
dev_dbg(dev->class_dev,
"Channel number to high\n");
return -EINVAL;
}
if (!comedi_range_is_bipolar(s, range)) {
dev_dbg(dev->class_dev,
"Bipolar is not selected in differential mode\n");
return -EINVAL;
}
}
}
return 0;
}
static int ai_round_cmd_args(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd,
unsigned int *init_ticks,
unsigned int *scan_ticks, unsigned int *chan_ticks)
{
int rest;
*init_ticks = 0;
*scan_ticks = 0;
*chan_ticks = 0;
if (cmd->start_arg) {
*init_ticks = (cmd->start_arg * 33) / 1000;
rest = (cmd->start_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
(*init_ticks)++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
(*init_ticks)++;
}
}
if (cmd->scan_begin_arg) {
*scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
rest = (cmd->scan_begin_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
(*scan_ticks)++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
(*scan_ticks)++;
}
}
if (cmd->convert_arg) {
*chan_ticks = (cmd->convert_arg * 33) / 1000;
rest = (cmd->convert_arg * 33) % 1000;
if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
if (rest > 33)
(*chan_ticks)++;
} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
if (rest)
(*chan_ticks)++;
}
}
return 0;
}
static void ai_write_timer(struct comedi_device *dev,
unsigned int init_ticks,
unsigned int scan_ticks, unsigned int chan_ticks)
{
outl(init_ticks - 1, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
if (scan_ticks) {
outl(scan_ticks - 1, dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
}
outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
}
static int ai_write_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
unsigned int entry;
unsigned int chan;
unsigned int rang;
unsigned int aref;
int i;
for (i = 0; i < cmd->chanlist_len; i++) {
chan = CR_CHAN(cmd->chanlist[i]);
rang = CR_RANGE(cmd->chanlist[i]);
aref = CR_AREF(cmd->chanlist[i]);
entry = chan;
if (rang == 0)
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
else if (rang == 1)
entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
else if (rang == 2)
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
else
entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
if (aref == AREF_DIFF)
entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
else
entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED;
outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
}
return 0;
}
static int ai_prepare(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd,
unsigned int init_ticks,
unsigned int scan_ticks, unsigned int chan_ticks)
{
unsigned int tmp = 0;
/* Write timer arguments */
ai_write_timer(dev, init_ticks, scan_ticks, chan_ticks);
/* Reset control register */
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Start sources */
if ((cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) ||
(cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER)) {
tmp = ME4000_AI_CTRL_BIT_MODE_1 |
ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
ME4000_AI_CTRL_BIT_DATA_FIFO;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_TIMER) {
tmp = ME4000_AI_CTRL_BIT_MODE_2 |
ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
ME4000_AI_CTRL_BIT_DATA_FIFO;
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_EXT) {
tmp = ME4000_AI_CTRL_BIT_MODE_0 |
ME4000_AI_CTRL_BIT_MODE_1 |
ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
ME4000_AI_CTRL_BIT_DATA_FIFO;
} else {
tmp = ME4000_AI_CTRL_BIT_MODE_0 |
ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
ME4000_AI_CTRL_BIT_DATA_FIFO;
}
/* Stop triggers */
if (cmd->stop_src == TRIG_COUNT) {
outl(cmd->chanlist_len * cmd->stop_arg,
dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
} else if (cmd->stop_src == TRIG_NONE &&
cmd->scan_end_src == TRIG_COUNT) {
outl(cmd->scan_end_arg,
dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
} else {
tmp |= ME4000_AI_CTRL_BIT_HF_IRQ;
}
/* Write the setup to the control register */
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Write the channel list */
ai_write_chanlist(dev, s, cmd);
return 0;
}
static int me4000_ai_do_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
int err;
unsigned int init_ticks = 0;
unsigned int scan_ticks = 0;
unsigned int chan_ticks = 0;
struct comedi_cmd *cmd = &s->async->cmd;
/* Reset the analog input */
err = me4000_ai_cancel(dev, s);
if (err)
return err;
/* Round the timer arguments */
err = ai_round_cmd_args(dev,
s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
if (err)
return err;
/* Prepare the AI for acquisition */
err = ai_prepare(dev, s, cmd, init_ticks, scan_ticks, chan_ticks);
if (err)
return err;
/* Start acquistion by dummy read */
inl(dev->iobase + ME4000_AI_START_REG);
return 0;
}
static int me4000_ai_do_cmd_test(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
unsigned int init_ticks;
unsigned int chan_ticks;
unsigned int scan_ticks;
int err = 0;
/* Round the timer arguments */
ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src,
TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_end_src,
TRIG_NONE | TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_NONE | TRIG_COUNT);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= cfc_check_trigger_is_unique(cmd->start_src);
err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
err |= cfc_check_trigger_is_unique(cmd->convert_src);
err |= cfc_check_trigger_is_unique(cmd->scan_end_src);
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
} else if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_TIMER) {
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_EXT) {
} else {
err |= -EINVAL;
}
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->chanlist_len < 1) {
cmd->chanlist_len = 1;
err |= -EINVAL;
}
if (init_ticks < 66) {
cmd->start_arg = 2000;
err |= -EINVAL;
}
if (scan_ticks && scan_ticks < 67) {
cmd->scan_begin_arg = 2031;
err |= -EINVAL;
}
if (chan_ticks < 66) {
cmd->convert_arg = 2000;
err |= -EINVAL;
}
if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/*
* Stage 4. Check for argument conflicts.
*/
if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
dev_err(dev->class_dev, "Invalid scan end arg\n");
/* At least one tick more */
cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
err++;
}
} else if (cmd->start_src == TRIG_NOW &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
dev_err(dev->class_dev, "Invalid scan end arg\n");
/* At least one tick more */
cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31;
err++;
}
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_FOLLOW &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_TIMER) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
if (chan_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid convert arg\n");
cmd->convert_arg = 2000; /* 66 ticks at least */
err++;
}
} else if (cmd->start_src == TRIG_EXT &&
cmd->scan_begin_src == TRIG_EXT &&
cmd->convert_src == TRIG_EXT) {
/* Check timer arguments */
if (init_ticks < ME4000_AI_MIN_TICKS) {
dev_err(dev->class_dev, "Invalid start arg\n");
cmd->start_arg = 2000; /* 66 ticks at least */
err++;
}
}
if (cmd->scan_end_src == TRIG_COUNT) {
if (cmd->scan_end_arg == 0) {
dev_err(dev->class_dev, "Invalid scan end arg\n");
cmd->scan_end_arg = 1;
err++;
}
}
if (err)
return 4;
/* Step 5: check channel list if it exists */
if (cmd->chanlist && cmd->chanlist_len > 0)
err |= me4000_ai_check_chanlist(dev, s, cmd);
if (err)
return 5;
return 0;
}
static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
{
unsigned int tmp;
struct comedi_device *dev = dev_id;
struct comedi_subdevice *s = dev->read_subdev;
int i;
int c = 0;
unsigned int lval;
if (!dev->attached)
return IRQ_NONE;
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_AI_HF) {
/* Read status register to find out what happened */
tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
if (!(tmp & ME4000_AI_STATUS_BIT_FF_DATA) &&
!(tmp & ME4000_AI_STATUS_BIT_HF_DATA) &&
(tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
c = ME4000_AI_FIFO_COUNT;
/*
* FIFO overflow, so stop conversion
* and disable all interrupts
*/
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
dev_err(dev->class_dev, "FIFO overflow\n");
} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
&& !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
&& (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
c = ME4000_AI_FIFO_COUNT / 2;
} else {
dev_err(dev->class_dev,
"Can't determine state of fifo\n");
c = 0;
/*
* Undefined state, so stop conversion
* and disable all interrupts
*/
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
dev_err(dev->class_dev, "Undefined FIFO state\n");
}
for (i = 0; i < c; i++) {
/* Read value from data fifo */
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
if (!comedi_buf_write_samples(s, &lval, 1)) {
/*
* Buffer overflow, so stop conversion
* and disable all interrupts
*/
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
break;
}
}
/* Work is done, so reset the interrupt */
tmp |= ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
tmp &= ~ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
ME4000_IRQ_STATUS_BIT_SC) {
s->async->events |= COMEDI_CB_EOA;
/*
* Acquisition is complete, so stop
* conversion and disable all interrupts
*/
tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ);
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
/* Poll data until fifo empty */
while (inl(dev->iobase + ME4000_AI_CTRL_REG) &
ME4000_AI_STATUS_BIT_EF_DATA) {
/* Read value from data fifo */
lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
lval ^= 0x8000;
if (!comedi_buf_write_samples(s, &lval, 1))
break;
}
/* Work is done, so reset the interrupt */
tmp |= ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
tmp &= ~ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
}
comedi_handle_events(dev, s);
return IRQ_HANDLED;
}
static int me4000_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
unsigned int tmp;
/* Stop any running conversion */
tmp = inl(dev->iobase + ME4000_AO_CTRL_REG(chan));
tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
outl(tmp, dev->iobase + ME4000_AO_CTRL_REG(chan));
/* Clear control register and set to single mode */
outl(0x0, dev->iobase + ME4000_AO_CTRL_REG(chan));
/* Write data value */
outl(data[0], dev->iobase + ME4000_AO_SINGLE_REG(chan));
/* Store in the mirror */
s->readback[chan] = data[0];
return 1;
}
static int me4000_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
outl((s->state >> 0) & 0xFF,
dev->iobase + ME4000_DIO_PORT_0_REG);
outl((s->state >> 8) & 0xFF,
dev->iobase + ME4000_DIO_PORT_1_REG);
outl((s->state >> 16) & 0xFF,
dev->iobase + ME4000_DIO_PORT_2_REG);
outl((s->state >> 24) & 0xFF,
dev->iobase + ME4000_DIO_PORT_3_REG);
}
data[1] = ((inl(dev->iobase + ME4000_DIO_PORT_0_REG) & 0xFF) << 0) |
((inl(dev->iobase + ME4000_DIO_PORT_1_REG) & 0xFF) << 8) |
((inl(dev->iobase + ME4000_DIO_PORT_2_REG) & 0xFF) << 16) |
((inl(dev->iobase + ME4000_DIO_PORT_3_REG) & 0xFF) << 24);
return insn->n;
}
static int me4000_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
unsigned int tmp;
int ret;
if (chan < 8)
mask = 0x000000ff;
else if (chan < 16)
mask = 0x0000ff00;
else if (chan < 24)
mask = 0x00ff0000;
else
mask = 0xff000000;
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1 |
ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3 |
ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5 |
ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7);
if (s->io_bits & 0x000000ff)
tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
if (s->io_bits & 0x0000ff00)
tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
if (s->io_bits & 0x00ff0000)
tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
if (s->io_bits & 0xff000000)
tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
/*
* Check for optoisolated ME-4000 version.
* If one the first port is a fixed output
* port and the second is a fixed input port.
*/
if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
s->io_bits |= 0x000000ff;
s->io_bits &= ~0x0000ff00;
tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
ME4000_DIO_CTRL_BIT_MODE_3);
}
outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
return insn->n;
}
/*=============================================================================
Counter section
===========================================================================*/
static int me4000_cnt_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct me4000_info *info = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int err;
switch (data[0]) {
case GPCT_RESET:
if (insn->n != 1)
return -EINVAL;
err = i8254_set_mode(info->timer_regbase, 0, chan,
I8254_MODE0 | I8254_BINARY);
if (err)
return err;
i8254_write(info->timer_regbase, 0, chan, 0);
break;
case GPCT_SET_OPERATION:
if (insn->n != 2)
return -EINVAL;
err = i8254_set_mode(info->timer_regbase, 0, chan,
(data[1] << 1) | I8254_BINARY);
if (err)
return err;
break;
default:
return -EINVAL;
}
return insn->n;
}
static int me4000_cnt_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct me4000_info *info = dev->private;
if (insn->n == 0)
return 0;
if (insn->n > 1) {
dev_err(dev->class_dev, "Invalid instruction length %d\n",
insn->n);
return -EINVAL;
}
data[0] = i8254_read(info->timer_regbase, 0, insn->chanspec);
return 1;
}
static int me4000_cnt_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct me4000_info *info = dev->private;
if (insn->n == 0) {
return 0;
} else if (insn->n > 1) {
dev_err(dev->class_dev, "Invalid instruction length %d\n",
insn->n);
return -EINVAL;
}
i8254_write(info->timer_regbase, 0, insn->chanspec, data[0]);
return 1;
}
static int me4000_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct me4000_board *thisboard = NULL;
struct me4000_info *info;
struct comedi_subdevice *s;
int result;
if (context < ARRAY_SIZE(me4000_boards))
thisboard = &me4000_boards[context];
if (!thisboard)
return -ENODEV;
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
info = comedi_alloc_devpriv(dev, sizeof(*info));
if (!info)
return -ENOMEM;
result = comedi_pci_enable(dev);
if (result)
return result;
info->plx_regbase = pci_resource_start(pcidev, 1);
dev->iobase = pci_resource_start(pcidev, 2);
info->timer_regbase = pci_resource_start(pcidev, 3);
if (!info->plx_regbase || !dev->iobase || !info->timer_regbase)
return -ENODEV;
result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
me4000_xilinx_download, 0);
if (result < 0)
return result;
me4000_reset(dev);
if (pcidev->irq > 0) {
result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
dev->board_name, dev);
if (result == 0)
dev->irq = pcidev->irq;
}
result = comedi_alloc_subdevices(dev, 4);
if (result)
return result;
/*=========================================================================
Analog input subdevice
========================================================================*/
s = &dev->subdevices[0];
if (thisboard->ai_nchan) {
s->type = COMEDI_SUBD_AI;
s->subdev_flags =
SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
s->n_chan = thisboard->ai_nchan;
s->maxdata = 0xFFFF; /* 16 bit ADC */
s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT;
s->range_table = &me4000_ai_range;
s->insn_read = me4000_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->cancel = me4000_ai_cancel;
s->do_cmdtest = me4000_ai_do_cmd_test;
s->do_cmd = me4000_ai_do_cmd;
}
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/*=========================================================================
Analog output subdevice
========================================================================*/
s = &dev->subdevices[1];
if (thisboard->ao_nchan) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
s->n_chan = thisboard->ao_nchan;
s->maxdata = 0xFFFF; /* 16 bit DAC */
s->range_table = &range_bipolar10;
s->insn_write = me4000_ao_insn_write;
result = comedi_alloc_subdev_readback(s);
if (result)
return result;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/*=========================================================================
Digital I/O subdevice
========================================================================*/
s = &dev->subdevices[2];
if (thisboard->dio_nchan) {
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = thisboard->dio_nchan;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = me4000_dio_insn_bits;
s->insn_config = me4000_dio_insn_config;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/*
* Check for optoisolated ME-4000 version. If one the first
* port is a fixed output port and the second is a fixed input port.
*/
if (!inl(dev->iobase + ME4000_DIO_DIR_REG)) {
s->io_bits |= 0xFF;
outl(ME4000_DIO_CTRL_BIT_MODE_0,
dev->iobase + ME4000_DIO_DIR_REG);
}
/*=========================================================================
Counter subdevice
========================================================================*/
s = &dev->subdevices[3];
if (thisboard->has_counter) {
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 3;
s->maxdata = 0xFFFF; /* 16 bit counters */
s->insn_read = me4000_cnt_insn_read;
s->insn_write = me4000_cnt_insn_write;
s->insn_config = me4000_cnt_insn_config;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
return 0;
}
static void me4000_detach(struct comedi_device *dev)
{
if (dev->iobase)
me4000_reset(dev);
comedi_pci_detach(dev);
}
static struct comedi_driver me4000_driver = {
.driver_name = "me4000",
.module = THIS_MODULE,
.auto_attach = me4000_auto_attach,
.detach = me4000_detach,
};
static int me4000_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &me4000_driver, id->driver_data);
}
static const struct pci_device_id me4000_pci_table[] = {
{ PCI_VDEVICE(MEILHAUS, 0x4650), BOARD_ME4650 },
{ PCI_VDEVICE(MEILHAUS, 0x4660), BOARD_ME4660 },
{ PCI_VDEVICE(MEILHAUS, 0x4661), BOARD_ME4660I },
{ PCI_VDEVICE(MEILHAUS, 0x4662), BOARD_ME4660S },
{ PCI_VDEVICE(MEILHAUS, 0x4663), BOARD_ME4660IS },
{ PCI_VDEVICE(MEILHAUS, 0x4670), BOARD_ME4670 },
{ PCI_VDEVICE(MEILHAUS, 0x4671), BOARD_ME4670I },
{ PCI_VDEVICE(MEILHAUS, 0x4672), BOARD_ME4670S },
{ PCI_VDEVICE(MEILHAUS, 0x4673), BOARD_ME4670IS },
{ PCI_VDEVICE(MEILHAUS, 0x4680), BOARD_ME4680 },
{ PCI_VDEVICE(MEILHAUS, 0x4681), BOARD_ME4680I },
{ PCI_VDEVICE(MEILHAUS, 0x4682), BOARD_ME4680S },
{ PCI_VDEVICE(MEILHAUS, 0x4683), BOARD_ME4680IS },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, me4000_pci_table);
static struct pci_driver me4000_pci_driver = {
.name = "me4000",
.id_table = me4000_pci_table,
.probe = me4000_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(ME4000_FIRMWARE);
| gpl-2.0 |
HinTak/linux | arch/x86/ia32/ia32_aout.c | 77 | 8225 | // SPDX-License-Identifier: GPL-2.0-only
/*
* a.out loader for x86-64
*
* Copyright (C) 1991, 1992, 1996 Linus Torvalds
* Hacked together by Andi Kleen
*/
#include <linux/module.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/a.out.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/perf_event.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/user32.h>
#include <asm/ia32.h>
#undef WARN_OLD
static int load_aout_binary(struct linux_binprm *);
static int load_aout_library(struct file *);
static struct linux_binfmt aout_format = {
.module = THIS_MODULE,
.load_binary = load_aout_binary,
.load_shlib = load_aout_library,
};
static int set_brk(unsigned long start, unsigned long end)
{
start = PAGE_ALIGN(start);
end = PAGE_ALIGN(end);
if (end <= start)
return 0;
return vm_brk(start, end - start);
}
/*
* create_aout_tables() parses the env- and arg-strings in new user
* memory and creates the pointer tables from them, and puts their
* addresses on the "stack", returning the new stack pointer value.
*/
static u32 __user *create_aout_tables(char __user *p, struct linux_binprm *bprm)
{
u32 __user *argv, *envp, *sp;
int argc = bprm->argc, envc = bprm->envc;
sp = (u32 __user *) ((-(unsigned long)sizeof(u32)) & (unsigned long) p);
sp -= envc+1;
envp = sp;
sp -= argc+1;
argv = sp;
put_user((unsigned long) envp, --sp);
put_user((unsigned long) argv, --sp);
put_user(argc, --sp);
current->mm->arg_start = (unsigned long) p;
while (argc-- > 0) {
char c;
put_user((u32)(unsigned long)p, argv++);
do {
get_user(c, p++);
} while (c);
}
put_user(0, argv);
current->mm->arg_end = current->mm->env_start = (unsigned long) p;
while (envc-- > 0) {
char c;
put_user((u32)(unsigned long)p, envp++);
do {
get_user(c, p++);
} while (c);
}
put_user(0, envp);
current->mm->env_end = (unsigned long) p;
return sp;
}
/*
* These are the functions used to load a.out style executables and shared
* libraries. There is no binary dependent code anywhere else.
*/
static int load_aout_binary(struct linux_binprm *bprm)
{
unsigned long error, fd_offset, rlim;
struct pt_regs *regs = current_pt_regs();
struct exec ex;
int retval;
ex = *((struct exec *) bprm->buf); /* exec-header */
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
N_TRSIZE(ex) || N_DRSIZE(ex) ||
i_size_read(file_inode(bprm->file)) <
ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
return -ENOEXEC;
}
fd_offset = N_TXTOFF(ex);
/* Check initial limits. This avoids letting people circumvent
* size limits imposed on them by creating programs with large
* arrays in the data or bss.
*/
rlim = rlimit(RLIMIT_DATA);
if (rlim >= RLIM_INFINITY)
rlim = ~0;
if (ex.a_data + ex.a_bss > rlim)
return -ENOMEM;
/* Flush all traces of the currently running executable */
retval = begin_new_exec(bprm);
if (retval)
return retval;
/* OK, This is the point of no return */
set_personality(PER_LINUX);
set_personality_ia32(false);
setup_new_exec(bprm);
regs->cs = __USER32_CS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 =
regs->r13 = regs->r14 = regs->r15 = 0;
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
current->mm->end_data = ex.a_data +
(current->mm->start_data = N_DATADDR(ex));
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
if (retval < 0)
return retval;
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
text_addr = N_TXTADDR(ex);
map_size = ex.a_text+ex.a_data;
error = vm_brk(text_addr & PAGE_MASK, map_size);
if (error)
return error;
error = read_code(bprm->file, text_addr, 32,
ex.a_text + ex.a_data);
if ((signed long)error < 0)
return error;
} else {
#ifdef WARN_OLD
static unsigned long error_time, error_time2;
if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
(N_MAGIC(ex) != NMAGIC) &&
time_after(jiffies, error_time2 + 5*HZ)) {
printk(KERN_NOTICE "executable not page aligned\n");
error_time2 = jiffies;
}
if ((fd_offset & ~PAGE_MASK) != 0 &&
time_after(jiffies, error_time + 5*HZ)) {
printk(KERN_WARNING
"fd_offset is not page aligned. Please convert "
"program: %pD\n",
bprm->file);
error_time = jiffies;
}
#endif
if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
if (error)
return error;
read_code(bprm->file, N_TXTADDR(ex), fd_offset,
ex.a_text+ex.a_data);
goto beyond_if;
}
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
MAP_EXECUTABLE | MAP_32BIT,
fd_offset);
if (error != N_TXTADDR(ex))
return error;
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
MAP_EXECUTABLE | MAP_32BIT,
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
return error;
}
beyond_if:
error = set_brk(current->mm->start_brk, current->mm->brk);
if (error)
return error;
set_binfmt(&aout_format);
current->mm->start_stack =
(unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
/* start thread */
loadsegment(fs, 0);
loadsegment(ds, __USER32_DS);
loadsegment(es, __USER32_DS);
load_gs_index(0);
(regs)->ip = ex.a_entry;
(regs)->sp = current->mm->start_stack;
(regs)->flags = 0x200;
(regs)->cs = __USER32_CS;
(regs)->ss = __USER32_DS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 =
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
return 0;
}
static int load_aout_library(struct file *file)
{
unsigned long bss, start_addr, len, error;
int retval;
struct exec ex;
loff_t pos = 0;
retval = -ENOEXEC;
error = kernel_read(file, &ex, sizeof(ex), &pos);
if (error != sizeof(ex))
goto out;
/* We come in here for the regular a.out style of shared libraries */
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
i_size_read(file_inode(file)) <
ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
goto out;
}
if (N_FLAGS(ex))
goto out;
/* For QMAGIC, the starting address is 0x20 into the page. We mask
this off to get the starting address for the page */
start_addr = ex.a_entry & 0xfffff000;
if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
#ifdef WARN_OLD
static unsigned long error_time;
if (time_after(jiffies, error_time + 5*HZ)) {
printk(KERN_WARNING
"N_TXTOFF is not page aligned. Please convert "
"library: %pD\n",
file);
error_time = jiffies;
}
#endif
retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
if (retval)
goto out;
read_code(file, start_addr, N_TXTOFF(ex),
ex.a_text + ex.a_data);
retval = 0;
goto out;
}
/* Now use mmap to map the library into memory. */
error = vm_mmap(file, start_addr, ex.a_text + ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT,
N_TXTOFF(ex));
retval = error;
if (error != start_addr)
goto out;
len = PAGE_ALIGN(ex.a_text + ex.a_data);
bss = ex.a_text + ex.a_data + ex.a_bss;
if (bss > len) {
retval = vm_brk(start_addr + len, bss - len);
if (retval)
goto out;
}
retval = 0;
out:
return retval;
}
static int __init init_aout_binfmt(void)
{
register_binfmt(&aout_format);
return 0;
}
static void __exit exit_aout_binfmt(void)
{
unregister_binfmt(&aout_format);
}
module_init(init_aout_binfmt);
module_exit(exit_aout_binfmt);
MODULE_LICENSE("GPL");
| gpl-2.0 |
miaoxie/linux-btrfs | drivers/clk/clk.c | 77 | 65904 | /*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Standard functionality for the common clock API. See Documentation/clk.txt
*/
#include <linux/clk-private.h>
#include <linux/clk/clk-conf.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/sched.h>
#include "clk.h"
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
/*** locking ***/
static void clk_prepare_lock(void)
{
if (!mutex_trylock(&prepare_lock)) {
if (prepare_owner == current) {
prepare_refcnt++;
return;
}
mutex_lock(&prepare_lock);
}
WARN_ON_ONCE(prepare_owner != NULL);
WARN_ON_ONCE(prepare_refcnt != 0);
prepare_owner = current;
prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
WARN_ON_ONCE(prepare_owner != current);
WARN_ON_ONCE(prepare_refcnt == 0);
if (--prepare_refcnt)
return;
prepare_owner = NULL;
mutex_unlock(&prepare_lock);
}
static unsigned long clk_enable_lock(void)
{
unsigned long flags;
if (!spin_trylock_irqsave(&enable_lock, flags)) {
if (enable_owner == current) {
enable_refcnt++;
return flags;
}
spin_lock_irqsave(&enable_lock, flags);
}
WARN_ON_ONCE(enable_owner != NULL);
WARN_ON_ONCE(enable_refcnt != 0);
enable_owner = current;
enable_refcnt = 1;
return flags;
}
static void clk_enable_unlock(unsigned long flags)
{
WARN_ON_ONCE(enable_owner != current);
WARN_ON_ONCE(enable_refcnt == 0);
if (--enable_refcnt)
return;
enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, flags);
}
/*** debugfs support ***/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
static int inited = 0;
static DEFINE_MUTEX(clk_debug_lock);
static HLIST_HEAD(clk_debug_list);
static struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
};
static struct hlist_head *orphan_list[] = {
&clk_orphan_list,
NULL,
};
static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
{
if (!c)
return;
seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, clk_get_rate(c),
clk_get_accuracy(c), clk_get_phase(c));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
int level)
{
struct clk *child;
if (!c)
return;
clk_summary_show_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
}
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
seq_puts(s, "----------------------------------------------------------------------------------------\n");
clk_prepare_lock();
for (; *lists; lists++)
hlist_for_each_entry(c, *lists, child_node)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
return 0;
}
static int clk_summary_open(struct inode *inode, struct file *file)
{
return single_open(file, clk_summary_show, inode->i_private);
}
static const struct file_operations clk_summary_fops = {
.open = clk_summary_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
{
if (!c)
return;
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_get_phase(c));
}
static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
{
struct clk *child;
if (!c)
return;
clk_dump_one(s, c, level);
hlist_for_each_entry(child, &c->children, child_node) {
seq_printf(s, ",");
clk_dump_subtree(s, child, level + 1);
}
seq_printf(s, "}");
}
static int clk_dump(struct seq_file *s, void *data)
{
struct clk *c;
bool first_node = true;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_printf(s, "{");
clk_prepare_lock();
for (; *lists; lists++) {
hlist_for_each_entry(c, *lists, child_node) {
if (!first_node)
seq_puts(s, ",");
first_node = false;
clk_dump_subtree(s, c, 0);
}
}
clk_prepare_unlock();
seq_printf(s, "}");
return 0;
}
static int clk_dump_open(struct inode *inode, struct file *file)
{
return single_open(file, clk_dump, inode->i_private);
}
static const struct file_operations clk_dump_fops = {
.open = clk_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
struct dentry *d;
int ret = -ENOMEM;
if (!clk || !pdentry) {
ret = -EINVAL;
goto out;
}
d = debugfs_create_dir(clk->name, pdentry);
if (!d)
goto out;
clk->dentry = d;
d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
(u32 *)&clk->rate);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
(u32 *)&clk->accuracy);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry,
(u32 *)&clk->phase);
if (!d)
goto err_out;
d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
(u32 *)&clk->flags);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
(u32 *)&clk->prepare_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
(u32 *)&clk->enable_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
(u32 *)&clk->notifier_count);
if (!d)
goto err_out;
if (clk->ops->debug_init) {
ret = clk->ops->debug_init(clk->hw, clk->dentry);
if (ret)
goto err_out;
}
ret = 0;
goto out;
err_out:
debugfs_remove_recursive(clk->dentry);
clk->dentry = NULL;
out:
return ret;
}
/**
* clk_debug_register - add a clk node to the debugfs clk tree
* @clk: the clk being added to the debugfs clk tree
*
* Dynamically adds a clk to the debugfs clk tree if debugfs has been
* initialized. Otherwise it bails out early since the debugfs clk tree
* will be created lazily by clk_debug_init as part of a late_initcall.
*/
static int clk_debug_register(struct clk *clk)
{
int ret = 0;
mutex_lock(&clk_debug_lock);
hlist_add_head(&clk->debug_node, &clk_debug_list);
if (!inited)
goto unlock;
ret = clk_debug_create_one(clk, rootdir);
unlock:
mutex_unlock(&clk_debug_lock);
return ret;
}
/**
* clk_debug_unregister - remove a clk node from the debugfs clk tree
* @clk: the clk being removed from the debugfs clk tree
*
* Dynamically removes a clk and all it's children clk nodes from the
* debugfs clk tree if clk->dentry points to debugfs created by
* clk_debug_register in __clk_init.
*/
static void clk_debug_unregister(struct clk *clk)
{
mutex_lock(&clk_debug_lock);
if (!clk->dentry)
goto out;
hlist_del_init(&clk->debug_node);
debugfs_remove_recursive(clk->dentry);
clk->dentry = NULL;
out:
mutex_unlock(&clk_debug_lock);
}
struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
void *data, const struct file_operations *fops)
{
struct dentry *d = NULL;
if (clk->dentry)
d = debugfs_create_file(name, mode, clk->dentry, data, fops);
return d;
}
EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
/**
* clk_debug_init - lazily create the debugfs clk tree visualization
*
* clks are often initialized very early during boot before memory can
* be dynamically allocated and well before debugfs is setup.
* clk_debug_init walks the clk tree hierarchy while holding
* prepare_lock and creates the topology as part of a late_initcall,
* thus insuring that clks initialized very early will still be
* represented in the debugfs clk tree. This function should only be
* called once at boot-time, and all other clks added dynamically will
* be done so with clk_debug_register.
*/
static int __init clk_debug_init(void)
{
struct clk *clk;
struct dentry *d;
rootdir = debugfs_create_dir("clk", NULL);
if (!rootdir)
return -ENOMEM;
d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
&clk_summary_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
&clk_dump_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
&orphan_list, &clk_summary_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
&orphan_list, &clk_dump_fops);
if (!d)
return -ENOMEM;
mutex_lock(&clk_debug_lock);
hlist_for_each_entry(clk, &clk_debug_list, debug_node)
clk_debug_create_one(clk, rootdir);
inited = 1;
mutex_unlock(&clk_debug_lock);
return 0;
}
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
{
}
static inline void clk_debug_unregister(struct clk *clk)
{
}
#endif
/* caller must hold prepare_lock */
static void clk_unprepare_unused_subtree(struct clk *clk)
{
struct clk *child;
if (!clk)
return;
hlist_for_each_entry(child, &clk->children, child_node)
clk_unprepare_unused_subtree(child);
if (clk->prepare_count)
return;
if (clk->flags & CLK_IGNORE_UNUSED)
return;
if (__clk_is_prepared(clk)) {
if (clk->ops->unprepare_unused)
clk->ops->unprepare_unused(clk->hw);
else if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
}
}
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
unsigned long flags;
if (!clk)
goto out;
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
if (clk->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
/*
* some gate clocks have special needs during the disable-unused
* sequence. call .disable_unused if available, otherwise fall
* back to .disable
*/
if (__clk_is_enabled(clk)) {
if (clk->ops->disable_unused)
clk->ops->disable_unused(clk->hw);
else if (clk->ops->disable)
clk->ops->disable(clk->hw);
}
unlock_out:
clk_enable_unlock(flags);
out:
return;
}
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
static int clk_disable_unused(void)
{
struct clk *clk;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_unprepare_unused_subtree(clk);
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
clk_prepare_unlock();
return 0;
}
late_initcall_sync(clk_disable_unused);
/*** helper functions ***/
const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
EXPORT_SYMBOL_GPL(__clk_get_name);
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
EXPORT_SYMBOL_GPL(__clk_get_hw);
u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? 0 : clk->num_parents;
}
EXPORT_SYMBOL_GPL(__clk_get_num_parents);
struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
EXPORT_SYMBOL_GPL(__clk_get_parent);
struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
{
if (!clk || index >= clk->num_parents)
return NULL;
else if (!clk->parents)
return __clk_lookup(clk->parent_names[index]);
else if (!clk->parents[index])
return clk->parents[index] =
__clk_lookup(clk->parent_names[index]);
else
return clk->parents[index];
}
EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
unsigned int __clk_get_enable_count(struct clk *clk)
{
return !clk ? 0 : clk->enable_count;
}
unsigned int __clk_get_prepare_count(struct clk *clk)
{
return !clk ? 0 : clk->prepare_count;
}
unsigned long __clk_get_rate(struct clk *clk)
{
unsigned long ret;
if (!clk) {
ret = 0;
goto out;
}
ret = clk->rate;
if (clk->flags & CLK_IS_ROOT)
goto out;
if (!clk->parent)
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL_GPL(__clk_get_rate);
unsigned long __clk_get_accuracy(struct clk *clk)
{
if (!clk)
return 0;
return clk->accuracy;
}
unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
}
EXPORT_SYMBOL_GPL(__clk_get_flags);
bool __clk_is_prepared(struct clk *clk)
{
int ret;
if (!clk)
return false;
/*
* .is_prepared is optional for clocks that can prepare
* fall back to software usage counter if it is missing
*/
if (!clk->ops->is_prepared) {
ret = clk->prepare_count ? 1 : 0;
goto out;
}
ret = clk->ops->is_prepared(clk->hw);
out:
return !!ret;
}
bool __clk_is_enabled(struct clk *clk)
{
int ret;
if (!clk)
return false;
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
*/
if (!clk->ops->is_enabled) {
ret = clk->enable_count ? 1 : 0;
goto out;
}
ret = clk->ops->is_enabled(clk->hw);
out:
return !!ret;
}
EXPORT_SYMBOL_GPL(__clk_is_enabled);
static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
struct clk *child;
struct clk *ret;
if (!strcmp(clk->name, name))
return clk;
hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
}
return NULL;
}
struct clk *__clk_lookup(const char *name)
{
struct clk *root_clk;
struct clk *ret;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
return NULL;
}
/*
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
*/
long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
struct clk **best_parent_p)
{
struct clk *clk = hw->clk, *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0;
/* if NO_REPARENT flag set, pass through to current parent */
if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
parent = clk->parent;
if (clk->flags & CLK_SET_RATE_PARENT)
best = __clk_round_rate(parent, rate);
else if (parent)
best = __clk_get_rate(parent);
else
best = __clk_get_rate(clk);
goto out;
}
/* find the parent that can provide the fastest rate <= rate */
num_parents = clk->num_parents;
for (i = 0; i < num_parents; i++) {
parent = clk_get_parent_by_index(clk, i);
if (!parent)
continue;
if (clk->flags & CLK_SET_RATE_PARENT)
parent_rate = __clk_round_rate(parent, rate);
else
parent_rate = __clk_get_rate(parent);
if (parent_rate <= rate && parent_rate > best) {
best_parent = parent;
best = parent_rate;
}
}
out:
if (best_parent)
*best_parent_p = best_parent;
*best_parent_rate = best;
return best;
}
EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
{
if (!clk)
return;
if (WARN_ON(clk->prepare_count == 0))
return;
if (--clk->prepare_count > 0)
return;
WARN_ON(clk->enable_count > 0);
if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
__clk_unprepare(clk->parent);
}
/**
* clk_unprepare - undo preparation of a clock source
* @clk: the clk being unprepared
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
* if the operation may sleep. One example is a clk which is accessed over
* I2c. In the complex case a clk gate operation may require a fast and a slow
* part. It is this reason that clk_unprepare and clk_disable are not mutually
* exclusive. In fact clk_disable must be called before clk_unprepare.
*/
void clk_unprepare(struct clk *clk)
{
if (IS_ERR_OR_NULL(clk))
return;
clk_prepare_lock();
__clk_unprepare(clk);
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
int __clk_prepare(struct clk *clk)
{
int ret = 0;
if (!clk)
return 0;
if (clk->prepare_count == 0) {
ret = __clk_prepare(clk->parent);
if (ret)
return ret;
if (clk->ops->prepare) {
ret = clk->ops->prepare(clk->hw);
if (ret) {
__clk_unprepare(clk->parent);
return ret;
}
}
}
clk->prepare_count++;
return 0;
}
/**
* clk_prepare - prepare a clock source
* @clk: the clk being prepared
*
* clk_prepare may sleep, which differentiates it from clk_enable. In a simple
* case, clk_prepare can be used instead of clk_enable to ungate a clk if the
* operation may sleep. One example is a clk which is accessed over I2c. In
* the complex case a clk ungate operation may require a fast and a slow part.
* It is this reason that clk_prepare and clk_enable are not mutually
* exclusive. In fact clk_prepare must be called before clk_enable.
* Returns 0 on success, -EERROR otherwise.
*/
int clk_prepare(struct clk *clk)
{
int ret;
clk_prepare_lock();
ret = __clk_prepare(clk);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_prepare);
static void __clk_disable(struct clk *clk)
{
if (!clk)
return;
if (WARN_ON(clk->enable_count == 0))
return;
if (--clk->enable_count > 0)
return;
if (clk->ops->disable)
clk->ops->disable(clk->hw);
__clk_disable(clk->parent);
}
/**
* clk_disable - gate a clock
* @clk: the clk being gated
*
* clk_disable must not sleep, which differentiates it from clk_unprepare. In
* a simple case, clk_disable can be used instead of clk_unprepare to gate a
* clk if the operation is fast and will never sleep. One example is a
* SoC-internal clk which is controlled via simple register writes. In the
* complex case a clk gate operation may require a fast and a slow part. It is
* this reason that clk_unprepare and clk_disable are not mutually exclusive.
* In fact clk_disable must be called before clk_unprepare.
*/
void clk_disable(struct clk *clk)
{
unsigned long flags;
if (IS_ERR_OR_NULL(clk))
return;
flags = clk_enable_lock();
__clk_disable(clk);
clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
static int __clk_enable(struct clk *clk)
{
int ret = 0;
if (!clk)
return 0;
if (WARN_ON(clk->prepare_count == 0))
return -ESHUTDOWN;
if (clk->enable_count == 0) {
ret = __clk_enable(clk->parent);
if (ret)
return ret;
if (clk->ops->enable) {
ret = clk->ops->enable(clk->hw);
if (ret) {
__clk_disable(clk->parent);
return ret;
}
}
}
clk->enable_count++;
return 0;
}
/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
*
* clk_enable must not sleep, which differentiates it from clk_prepare. In a
* simple case, clk_enable can be used instead of clk_prepare to ungate a clk
* if the operation will never sleep. One example is a SoC-internal clk which
* is controlled via simple register writes. In the complex case a clk ungate
* operation may require a fast and a slow part. It is this reason that
* clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
* must be called before clk_enable. Returns 0 on success, -EERROR
* otherwise.
*/
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
flags = clk_enable_lock();
ret = __clk_enable(clk);
clk_enable_unlock(flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_enable);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
* @rate: the rate which is to be rounded
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long parent_rate = 0;
struct clk *parent;
if (!clk)
return 0;
parent = clk->parent;
if (parent)
parent_rate = parent->rate;
if (clk->ops->determine_rate)
return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
&parent);
else if (clk->ops->round_rate)
return clk->ops->round_rate(clk->hw, rate, &parent_rate);
else if (clk->flags & CLK_SET_RATE_PARENT)
return __clk_round_rate(clk->parent, rate);
else
return clk->rate;
}
EXPORT_SYMBOL_GPL(__clk_round_rate);
/**
* clk_round_rate - round the given rate for a clk
* @clk: the clk for which we are rounding a rate
* @rate: the rate which is to be rounded
*
* Takes in a rate as input and rounds it to a rate that the clk can actually
* use which is then returned. If clk doesn't support round_rate operation
* then the parent rate is returned.
*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
/**
* __clk_notify - call clk notifier chain
* @clk: struct clk * that is changing rate
* @msg: clk notifier type (see include/linux/clk.h)
* @old_rate: old clk rate
* @new_rate: new clk rate
*
* Triggers a notifier call chain on the clk rate-change notification
* for 'clk'. Passes a pointer to the struct clk and the previous
* and current rates to the notifier callback. Intended to be called by
* internal clock code only. Returns NOTIFY_DONE from the last driver
* called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
* a driver returns that.
*/
static int __clk_notify(struct clk *clk, unsigned long msg,
unsigned long old_rate, unsigned long new_rate)
{
struct clk_notifier *cn;
struct clk_notifier_data cnd;
int ret = NOTIFY_DONE;
cnd.clk = clk;
cnd.old_rate = old_rate;
cnd.new_rate = new_rate;
list_for_each_entry(cn, &clk_notifier_list, node) {
if (cn->clk == clk) {
ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
&cnd);
break;
}
}
return ret;
}
/**
* __clk_recalc_accuracies
* @clk: first clk in the subtree
*
* Walks the subtree of clks starting with clk and recalculates accuracies as
* it goes. Note that if a clk does not implement the .recalc_accuracy
* callback then it is assumed that the clock will take on the accuracy of it's
* parent.
*
* Caller must hold prepare_lock.
*/
static void __clk_recalc_accuracies(struct clk *clk)
{
unsigned long parent_accuracy = 0;
struct clk *child;
if (clk->parent)
parent_accuracy = clk->parent->accuracy;
if (clk->ops->recalc_accuracy)
clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
parent_accuracy);
else
clk->accuracy = parent_accuracy;
hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_accuracies(child);
}
/**
* clk_get_accuracy - return the accuracy of clk
* @clk: the clk whose accuracy is being returned
*
* Simply returns the cached accuracy of the clk, unless
* CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
* issued.
* If clk is NULL then returns 0.
*/
long clk_get_accuracy(struct clk *clk)
{
unsigned long accuracy;
clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
__clk_recalc_accuracies(clk);
accuracy = __clk_get_accuracy(clk);
clk_prepare_unlock();
return accuracy;
}
EXPORT_SYMBOL_GPL(clk_get_accuracy);
static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
{
if (clk->ops->recalc_rate)
return clk->ops->recalc_rate(clk->hw, parent_rate);
return parent_rate;
}
/**
* __clk_recalc_rates
* @clk: first clk in the subtree
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
* it is assumed that the clock will take on the rate of its parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*
* Caller must hold prepare_lock.
*/
static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
struct clk *child;
old_rate = clk->rate;
if (clk->parent)
parent_rate = clk->parent->rate;
clk->rate = clk_recalc(clk, parent_rate);
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
* & ABORT_RATE_CHANGE notifiers
*/
if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate);
hlist_for_each_entry(child, &clk->children, child_node)
__clk_recalc_rates(child, msg);
}
/**
* clk_get_rate - return the rate of clk
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
* is set, which means a recalc_rate will be issued.
* If clk is NULL then returns 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
clk_prepare_unlock();
return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
{
int i;
if (!clk->parents) {
clk->parents = kcalloc(clk->num_parents,
sizeof(struct clk *), GFP_KERNEL);
if (!clk->parents)
return -ENOMEM;
}
/*
* find index of new parent clock using cached parent ptrs,
* or if not yet cached, use string name comparison and cache
* them now to avoid future calls to __clk_lookup.
*/
for (i = 0; i < clk->num_parents; i++) {
if (clk->parents[i] == parent)
return i;
if (clk->parents[i])
continue;
if (!strcmp(clk->parent_names[i], parent->name)) {
clk->parents[i] = __clk_lookup(parent->name);
return i;
}
}
return -EINVAL;
}
static void clk_reparent(struct clk *clk, struct clk *new_parent)
{
hlist_del(&clk->child_node);
if (new_parent) {
/* avoid duplicate POST_RATE_CHANGE notifications */
if (new_parent->new_child == clk)
new_parent->new_child = NULL;
hlist_add_head(&clk->child_node, &new_parent->children);
} else {
hlist_add_head(&clk->child_node, &clk_orphan_list);
}
clk->parent = new_parent;
}
static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
{
unsigned long flags;
struct clk *old_parent = clk->parent;
/*
* Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
* clk_enable/disable() is impossible since we already have the
* prepare lock (future calls to clk_enable() need to be preceded by
* a clk_prepare()).
*
* If the clock is prepared, migrate the prepared state to the new
* parent and also protect against a race with clk_enable() by
* forcing the clock and the new parent on. This ensures that all
* future calls to clk_enable() are practically NOPs with respect to
* hardware and software states.
*
* See also: Comment for clk_set_parent() below.
*/
if (clk->prepare_count) {
__clk_prepare(parent);
clk_enable(parent);
clk_enable(clk);
}
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(clk, parent);
clk_enable_unlock(flags);
return old_parent;
}
static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
struct clk *old_parent)
{
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(old_parent);
__clk_unprepare(old_parent);
}
}
static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
{
unsigned long flags;
int ret = 0;
struct clk *old_parent;
old_parent = __clk_set_parent_before(clk, parent);
/* change clock input source */
if (parent && clk->ops->set_parent)
ret = clk->ops->set_parent(clk->hw, p_index);
if (ret) {
flags = clk_enable_lock();
clk_reparent(clk, old_parent);
clk_enable_unlock(flags);
if (clk->prepare_count) {
clk_disable(clk);
clk_disable(parent);
__clk_unprepare(parent);
}
return ret;
}
__clk_set_parent_after(clk, parent, old_parent);
return 0;
}
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
* @parent_rate: the "future" rate of clk's parent
*
* Walks the subtree of clks starting with clk, speculating rates as it
* goes and firing off PRE_RATE_CHANGE notifications as necessary.
*
* Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
* take on the rate of its parent.
*
* Caller must hold prepare_lock.
*/
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{
struct clk *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
new_rate = clk_recalc(clk, parent_rate);
/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
if (ret & NOTIFY_STOP_MASK) {
pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
__func__, clk->name, ret);
goto out;
}
hlist_for_each_entry(child, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
if (ret & NOTIFY_STOP_MASK)
break;
}
out:
return ret;
}
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
struct clk *new_parent, u8 p_index)
{
struct clk *child;
clk->new_rate = new_rate;
clk->new_parent = new_parent;
clk->new_parent_index = p_index;
/* include clk in new parent's PRE_RATE_CHANGE notifications */
clk->new_child = NULL;
if (new_parent && new_parent != clk->parent)
new_parent->new_child = clk;
hlist_for_each_entry(child, &clk->children, child_node) {
child->new_rate = clk_recalc(child, new_rate);
clk_calc_subtree(child, child->new_rate, NULL, 0);
}
}
/*
* calculate the new rates returning the topmost clock that has to be
* changed.
*/
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
struct clk *old_parent, *parent;
unsigned long best_parent_rate = 0;
unsigned long new_rate;
int p_index = 0;
/* sanity */
if (IS_ERR_OR_NULL(clk))
return NULL;
/* save parent rate, if it exists */
parent = old_parent = clk->parent;
if (parent)
best_parent_rate = parent->rate;
/* find the closest rate and parent clk/rate */
if (clk->ops->determine_rate) {
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
&parent);
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
/* pass-through clock without adjustable parent */
clk->new_rate = clk->rate;
return NULL;
} else {
/* pass-through clock with adjustable parent */
top = clk_calc_new_rates(parent, rate);
new_rate = parent->new_rate;
goto out;
}
/* some clocks must be gated to change parent */
if (parent != old_parent &&
(clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
pr_debug("%s: %s not gated but wants to reparent\n",
__func__, clk->name);
return NULL;
}
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
return NULL;
}
}
if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
best_parent_rate != parent->rate)
top = clk_calc_new_rates(parent, best_parent_rate);
out:
clk_calc_subtree(clk, new_rate, parent, p_index);
return top;
}
/*
* Notify about rate changes in a subtree. Always walk down the whole tree
* so that in case of an error we can walk down the whole tree again and
* abort the change.
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
struct clk *child, *tmp_clk, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
return NULL;
if (clk->notifier_count) {
ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
if (ret & NOTIFY_STOP_MASK)
fail_clk = clk;
}
hlist_for_each_entry(child, &clk->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != clk)
continue;
tmp_clk = clk_propagate_rate_change(child, event);
if (tmp_clk)
fail_clk = tmp_clk;
}
/* handle the new child who might not be in clk->children yet */
if (clk->new_child) {
tmp_clk = clk_propagate_rate_change(clk->new_child, event);
if (tmp_clk)
fail_clk = tmp_clk;
}
return fail_clk;
}
/*
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
static void clk_change_rate(struct clk *clk)
{
struct clk *child;
struct hlist_node *tmp;
unsigned long old_rate;
unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk *old_parent;
old_rate = clk->rate;
if (clk->new_parent)
best_parent_rate = clk->new_parent->rate;
else if (clk->parent)
best_parent_rate = clk->parent->rate;
if (clk->new_parent && clk->new_parent != clk->parent) {
old_parent = __clk_set_parent_before(clk, clk->new_parent);
if (clk->ops->set_rate_and_parent) {
skip_set_rate = true;
clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
best_parent_rate,
clk->new_parent_index);
} else if (clk->ops->set_parent) {
clk->ops->set_parent(clk->hw, clk->new_parent_index);
}
__clk_set_parent_after(clk, clk->new_parent, old_parent);
}
if (!skip_set_rate && clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
clk->rate = clk_recalc(clk, best_parent_rate);
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
/*
* Use safe iteration, as change_rate can actually swap parents
* for certain clock types.
*/
hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != clk)
continue;
clk_change_rate(child);
}
/* handle the new child who might not be in clk->children yet */
if (clk->new_child)
clk_change_rate(clk->new_child);
}
/**
* clk_set_rate - specify a new rate for clk
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* In the simplest case clk_set_rate will only adjust the rate of clk.
*
* Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
* propagate up to clk's parent; whether or not this happens depends on the
* outcome of clk's .round_rate implementation. If *parent_rate is unchanged
* after calling .round_rate then upstream parent propagation is ignored. If
* *parent_rate comes back with a new rate for clk's parent then we propagate
* up to clk's parent and set its rate. Upward propagation will continue
* until either a clk does not support the CLK_SET_RATE_PARENT flag or
* .round_rate stops requesting changes to clk's parent_rate.
*
* Rate changes are accomplished via tree traversal that also recalculates the
* rates for the clocks and fires off POST_RATE_CHANGE notifiers.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate(struct clk *clk, unsigned long rate)
{
struct clk *top, *fail_clk;
int ret = 0;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk_get_rate(clk))
goto out;
if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
ret = -EBUSY;
goto out;
}
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(clk, rate);
if (!top) {
ret = -EINVAL;
goto out;
}
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
ret = -EBUSY;
goto out;
}
/* change the rates */
clk_change_rate(top);
out:
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
* clk_get_parent - return the parent of a clk
* @clk: the clk whose parent gets returned
*
* Simply returns clk->parent. Returns NULL if clk is NULL.
*/
struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
clk_prepare_lock();
parent = __clk_get_parent(clk);
clk_prepare_unlock();
return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
/*
* .get_parent is mandatory for clocks with multiple possible parents. It is
* optional for single-parent clocks. Always call .get_parent if it is
* available and WARN if it is missing for multi-parent clocks.
*
* For single-parent clocks without .get_parent, first check to see if the
* .parents array exists, and if so use it to avoid an expensive tree
* traversal. If .parents does not exist then walk the tree with __clk_lookup.
*/
static struct clk *__clk_init_parent(struct clk *clk)
{
struct clk *ret = NULL;
u8 index;
/* handle the trivial cases */
if (!clk->num_parents)
goto out;
if (clk->num_parents == 1) {
if (IS_ERR_OR_NULL(clk->parent))
ret = clk->parent = __clk_lookup(clk->parent_names[0]);
ret = clk->parent;
goto out;
}
if (!clk->ops->get_parent) {
WARN(!clk->ops->get_parent,
"%s: multi-parent clocks must implement .get_parent\n",
__func__);
goto out;
};
/*
* Do our best to cache parent clocks in clk->parents. This prevents
* unnecessary and expensive calls to __clk_lookup. We don't set
* clk->parent here; that is done by the calling function
*/
index = clk->ops->get_parent(clk->hw);
if (!clk->parents)
clk->parents =
kcalloc(clk->num_parents, sizeof(struct clk *),
GFP_KERNEL);
ret = clk_get_parent_by_index(clk, index);
out:
return ret;
}
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
clk_reparent(clk, new_parent);
__clk_recalc_accuracies(clk);
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
*
* Re-parent clk to use parent as its new input source. If clk is in
* prepared state, the clk will get enabled for the duration of this call. If
* that's not acceptable for a specific clk (Eg: the consumer can't handle
* that, the reparenting is glitchy in hardware, etc), use the
* CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
*
* After successfully changing clk's parent clk_set_parent will update the
* clk topology, sysfs topology and propagate rate recalculation via
* __clk_recalc_rates.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
int p_index = 0;
unsigned long p_rate = 0;
if (!clk)
return 0;
/* verify ops for for multi-parent clks */
if ((clk->num_parents > 1) && (!clk->ops->set_parent))
return -ENOSYS;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->parent == parent)
goto out;
/* check that we are allowed to re-parent if the clock is in use */
if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
ret = -EBUSY;
goto out;
}
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(clk, parent);
p_rate = parent->rate;
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, clk->name);
ret = p_index;
goto out;
}
}
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(clk, p_rate);
/* abort if a driver objects */
if (ret & NOTIFY_STOP_MASK)
goto out;
/* do the re-parent */
ret = __clk_set_parent(clk, parent, p_index);
/* propagate rate an accuracy recalculation accordingly */
if (ret) {
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
} else {
__clk_recalc_rates(clk, POST_RATE_CHANGE);
__clk_recalc_accuracies(clk);
}
out:
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
/**
* clk_set_phase - adjust the phase shift of a clock signal
* @clk: clock signal source
* @degrees: number of degrees the signal is shifted
*
* Shifts the phase of a clock signal by the specified
* degrees. Returns 0 on success, -EERROR otherwise.
*
* This function makes no distinction about the input or reference
* signal that we adjust the clock signal phase against. For example
* phase locked-loop clock signal generators we may shift phase with
* respect to feedback clock signal input, but for other cases the
* clock phase may be shifted with respect to some other, unspecified
* signal.
*
* Additionally the concept of phase shift does not propagate through
* the clock tree hierarchy, which sets it apart from clock rates and
* clock accuracy. A parent clock phase attribute does not have an
* impact on the phase attribute of a child clock.
*/
int clk_set_phase(struct clk *clk, int degrees)
{
int ret = 0;
if (!clk)
goto out;
/* sanity check degrees */
degrees %= 360;
if (degrees < 0)
degrees += 360;
clk_prepare_lock();
if (!clk->ops->set_phase)
goto out_unlock;
ret = clk->ops->set_phase(clk->hw, degrees);
if (!ret)
clk->phase = degrees;
out_unlock:
clk_prepare_unlock();
out:
return ret;
}
/**
* clk_get_phase - return the phase shift of a clock signal
* @clk: clock signal source
*
* Returns the phase shift of a clock node in degrees, otherwise returns
* -EERROR.
*/
int clk_get_phase(struct clk *clk)
{
int ret = 0;
if (!clk)
goto out;
clk_prepare_lock();
ret = clk->phase;
clk_prepare_unlock();
out:
return ret;
}
/**
* __clk_init - initialize the data structures in a struct clk
* @dev: device initializing this clk, placeholder for now
* @clk: clk being initialized
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
*/
int __clk_init(struct device *dev, struct clk *clk)
{
int i, ret = 0;
struct clk *orphan;
struct hlist_node *tmp2;
if (!clk)
return -EINVAL;
clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
pr_debug("%s: clk %s already initialized\n",
__func__, clk->name);
ret = -EEXIST;
goto out;
}
/* check that clk_ops are sane. See Documentation/clk.txt */
if (clk->ops->set_rate &&
!((clk->ops->round_rate || clk->ops->determine_rate) &&
clk->ops->recalc_rate)) {
pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
}
if (clk->ops->set_parent && !clk->ops->get_parent) {
pr_warning("%s: %s must implement .get_parent & .set_parent\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
}
if (clk->ops->set_rate_and_parent &&
!(clk->ops->set_parent && clk->ops->set_rate)) {
pr_warn("%s: %s must implement .set_parent & .set_rate\n",
__func__, clk->name);
ret = -EINVAL;
goto out;
}
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
WARN(!clk->parent_names[i],
"%s: invalid NULL in %s's .parent_names\n",
__func__, clk->name);
/*
* Allocate an array of struct clk *'s to avoid unnecessary string
* look-ups of clk's possible parents. This can fail for clocks passed
* in to clk_init during early boot; thus any access to clk->parents[]
* must always check for a NULL pointer and try to populate it if
* necessary.
*
* If clk->parents is not NULL we skip this entire block. This allows
* for clock drivers to statically initialize clk->parents.
*/
if (clk->num_parents > 1 && !clk->parents) {
clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
GFP_KERNEL);
/*
* __clk_lookup returns NULL for parents that have not been
* clk_init'd; thus any access to clk->parents[] must check
* for a NULL pointer. We can always perform lazy lookups for
* missing parents later on.
*/
if (clk->parents)
for (i = 0; i < clk->num_parents; i++)
clk->parents[i] =
__clk_lookup(clk->parent_names[i]);
}
clk->parent = __clk_init_parent(clk);
/*
* Populate clk->parent if parent has already been __clk_init'd. If
* parent has not yet been __clk_init'd then place clk in the orphan
* list. If clk has set the CLK_IS_ROOT flag then place it in the root
* clk list.
*
* Every time a new clk is clk_init'd then we walk the list of orphan
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
if (clk->parent)
hlist_add_head(&clk->child_node,
&clk->parent->children);
else if (clk->flags & CLK_IS_ROOT)
hlist_add_head(&clk->child_node, &clk_root_list);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
/*
* Set clk's accuracy. The preferred method is to use
* .recalc_accuracy. For simple clocks and lazy developers the default
* fallback is to use the parent's accuracy. If a clock doesn't have a
* parent (or is orphaned) then accuracy is set to zero (perfect
* clock).
*/
if (clk->ops->recalc_accuracy)
clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
__clk_get_accuracy(clk->parent));
else if (clk->parent)
clk->accuracy = clk->parent->accuracy;
else
clk->accuracy = 0;
/*
* Set clk's phase.
* Since a phase is by definition relative to its parent, just
* query the current clock phase, or just assume it's in phase.
*/
if (clk->ops->get_phase)
clk->phase = clk->ops->get_phase(clk->hw);
else
clk->phase = 0;
/*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
* then rate is set to zero.
*/
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw,
__clk_get_rate(clk->parent));
else if (clk->parent)
clk->rate = clk->parent->rate;
else
clk->rate = 0;
clk_debug_register(clk);
/*
* walk the list of orphan clocks and reparent any that are children of
* this clock
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->num_parents && orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
if (!strcmp(clk->name, orphan->parent_names[i]))
__clk_reparent(orphan, clk);
continue;
}
for (i = 0; i < orphan->num_parents; i++)
if (!strcmp(clk->name, orphan->parent_names[i])) {
__clk_reparent(orphan, clk);
break;
}
}
/*
* optional platform-specific magic
*
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
* using this callback, as its use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
kref_init(&clk->ref);
out:
clk_prepare_unlock();
return ret;
}
/**
* __clk_register - register a clock and return a cookie.
*
* Same as clk_register, except that the .clk field inside hw shall point to a
* preallocated (generally statically allocated) struct clk. None of the fields
* of the struct clk need to be initialized.
*
* The data pointed to by .init and .clk field shall NOT be marked as init
* data.
*
* __clk_register is only exposed via clk-private.h and is intended for use with
* very large numbers of clocks that need to be statically initialized. It is
* a layering violation to include clk-private.h from any code which implements
* a clock's .ops; as such any statically initialized clock data MUST be in a
* separate C file from the logic that implements its operations. Returns 0
* on success, otherwise an error code.
*/
struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
{
int ret;
struct clk *clk;
clk = hw->clk;
clk->name = hw->init->name;
clk->ops = hw->init->ops;
clk->hw = hw;
clk->flags = hw->init->flags;
clk->parent_names = hw->init->parent_names;
clk->num_parents = hw->init->num_parents;
if (dev && dev->driver)
clk->owner = dev->driver->owner;
else
clk->owner = NULL;
ret = __clk_init(dev, clk);
if (ret)
return ERR_PTR(ret);
return clk;
}
EXPORT_SYMBOL_GPL(__clk_register);
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
* rest of the clock API. In the event of an error clk_register will return an
* error code; drivers must test for an error code after calling clk_register.
*/
struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
int i, ret;
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk) {
pr_err("%s: could not allocate clk\n", __func__);
ret = -ENOMEM;
goto fail_out;
}
clk->name = kstrdup(hw->init->name, GFP_KERNEL);
if (!clk->name) {
pr_err("%s: could not allocate clk->name\n", __func__);
ret = -ENOMEM;
goto fail_name;
}
clk->ops = hw->init->ops;
if (dev && dev->driver)
clk->owner = dev->driver->owner;
clk->hw = hw;
clk->flags = hw->init->flags;
clk->num_parents = hw->init->num_parents;
hw->clk = clk;
/* allocate local copy in case parent_names is __initdata */
clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
GFP_KERNEL);
if (!clk->parent_names) {
pr_err("%s: could not allocate clk->parent_names\n", __func__);
ret = -ENOMEM;
goto fail_parent_names;
}
/* copy each string name in case parent_names is __initdata */
for (i = 0; i < clk->num_parents; i++) {
clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
GFP_KERNEL);
if (!clk->parent_names[i]) {
pr_err("%s: could not copy parent_names\n", __func__);
ret = -ENOMEM;
goto fail_parent_names_copy;
}
}
ret = __clk_init(dev, clk);
if (!ret)
return clk;
fail_parent_names_copy:
while (--i >= 0)
kfree(clk->parent_names[i]);
kfree(clk->parent_names);
fail_parent_names:
kfree(clk->name);
fail_name:
kfree(clk);
fail_out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(clk_register);
/*
* Free memory allocated for a clock.
* Caller must hold prepare_lock.
*/
static void __clk_release(struct kref *ref)
{
struct clk *clk = container_of(ref, struct clk, ref);
int i = clk->num_parents;
kfree(clk->parents);
while (--i >= 0)
kfree(clk->parent_names[i]);
kfree(clk->parent_names);
kfree(clk->name);
kfree(clk);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
* consumer calls clk_put() and the struct clk object is freed.
*/
static int clk_nodrv_prepare_enable(struct clk_hw *hw)
{
return -ENXIO;
}
static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
{
WARN_ON_ONCE(1);
}
static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
return -ENXIO;
}
static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
{
return -ENXIO;
}
static const struct clk_ops clk_nodrv_ops = {
.enable = clk_nodrv_prepare_enable,
.disable = clk_nodrv_disable_unprepare,
.prepare = clk_nodrv_prepare_enable,
.unprepare = clk_nodrv_disable_unprepare,
.set_rate = clk_nodrv_set_rate,
.set_parent = clk_nodrv_set_parent,
};
/**
* clk_unregister - unregister a currently registered clock
* @clk: clock to unregister
*/
void clk_unregister(struct clk *clk)
{
unsigned long flags;
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_debug_unregister(clk);
clk_prepare_lock();
if (clk->ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
return;
}
/*
* Assign empty clock ops for consumers that might still hold
* a reference to this clock.
*/
flags = clk_enable_lock();
clk->ops = &clk_nodrv_ops;
clk_enable_unlock(flags);
if (!hlist_empty(&clk->children)) {
struct clk *child;
struct hlist_node *t;
/* Reparent all children to the orphan list. */
hlist_for_each_entry_safe(child, t, &clk->children, child_node)
clk_set_parent(child, NULL);
}
hlist_del_init(&clk->child_node);
if (clk->prepare_count)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->name);
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
static void devm_clk_release(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
/**
* devm_clk_register - resource managed clk_register()
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
* Managed clk_register(). Clocks returned from this function are
* automatically clk_unregister()ed on driver detach. See clk_register() for
* more information.
*/
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
struct clk *clk;
struct clk **clkp;
clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
clk = clk_register(dev, hw);
if (!IS_ERR(clk)) {
*clkp = clk;
devres_add(dev, clkp);
} else {
devres_free(clkp);
}
return clk;
}
EXPORT_SYMBOL_GPL(devm_clk_register);
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk *c = res;
if (WARN_ON(!c))
return 0;
return c == data;
}
/**
* devm_clk_unregister - resource managed clk_unregister()
* @clk: clock to unregister
*
* Deallocate a clock allocated with devm_clk_register(). Normally
* this function will not need to be called and the resource management
* code will ensure that the resource is freed.
*/
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
/*
* clkdev helpers
*/
int __clk_get(struct clk *clk)
{
if (clk) {
if (!try_module_get(clk->owner))
return 0;
kref_get(&clk->ref);
}
return 1;
}
void __clk_put(struct clk *clk)
{
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
module_put(clk->owner);
}
/*** clk rate change notifiers ***/
/**
* clk_notifier_register - add a clk rate change notifier
* @clk: struct clk * to watch
* @nb: struct notifier_block * with callback info
*
* Request notification when clk's rate changes. This uses an SRCU
* notifier because we want it to block and notifier unregistrations are
* uncommon. The callbacks associated with the notifier must not
* re-enter into the clk framework by calling any top-level clk APIs;
* this will cause a nested prepare_lock mutex.
*
* In all notification cases cases (pre, post and abort rate change) the
* original clock rate is passed to the callback via struct
* clk_notifier_data.old_rate and the new frequency is passed via struct
* clk_notifier_data.new_rate.
*
* clk_notifier_register() must be called from non-atomic context.
* Returns -EINVAL if called with null arguments, -ENOMEM upon
* allocation failure; otherwise, passes along the return value of
* srcu_notifier_chain_register().
*/
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn;
int ret = -ENOMEM;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
break;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
if (cn->clk != clk) {
cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
if (!cn)
goto out;
cn->clk = clk;
srcu_init_notifier_head(&cn->notifier_head);
list_add(&cn->node, &clk_notifier_list);
}
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->notifier_count++;
out:
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);
/**
* clk_notifier_unregister - remove a clk rate change notifier
* @clk: struct clk *
* @nb: struct notifier_block * with callback info
*
* Request no further notification for changes to 'clk' and frees memory
* allocated in clk_notifier_register.
*
* Returns -EINVAL if called with null arguments; otherwise, passes
* along the return value of srcu_notifier_chain_unregister().
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn = NULL;
int ret = -EINVAL;
if (!clk || !nb)
return -EINVAL;
clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
break;
if (cn->clk == clk) {
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
clk->notifier_count--;
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
list_del(&cn->node);
kfree(cn);
}
} else {
ret = -ENOENT;
}
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
#ifdef CONFIG_OF
/**
* struct of_clk_provider - Clock provider registration structure
* @link: Entry in global list of clock providers
* @node: Pointer to device tree node of clock provider
* @get: Get clock callback. Returns NULL or a struct clk for the
* given clock specifier
* @data: context pointer to be passed into @get callback
*/
struct of_clk_provider {
struct list_head link;
struct device_node *node;
struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
void *data;
};
static const struct of_device_id __clk_of_table_sentinel
__used __section(__clk_of_table_end);
static LIST_HEAD(of_clk_providers);
static DEFINE_MUTEX(of_clk_mutex);
/* of_clk_provider list locking helpers */
void of_clk_lock(void)
{
mutex_lock(&of_clk_mutex);
}
void of_clk_unlock(void)
{
mutex_unlock(&of_clk_mutex);
}
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data)
{
return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_onecell_data *clk_data = data;
unsigned int idx = clkspec->args[0];
if (idx >= clk_data->clk_num) {
pr_err("%s: invalid clock index %d\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return clk_data->clks[idx];
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
/**
* of_clk_add_provider() - Register a clock provider for a node
* @np: Device node pointer associated with clock provider
* @clk_src_get: callback for decoding clock
* @data: context pointer for @clk_src_get callback.
*/
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
void *data),
void *data)
{
struct of_clk_provider *cp;
int ret;
cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
if (!cp)
return -ENOMEM;
cp->node = of_node_get(np);
cp->data = data;
cp->get = clk_src_get;
mutex_lock(&of_clk_mutex);
list_add(&cp->link, &of_clk_providers);
mutex_unlock(&of_clk_mutex);
pr_debug("Added clock from %s\n", np->full_name);
ret = of_clk_set_defaults(np, true);
if (ret < 0)
of_clk_del_provider(np);
return ret;
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
/**
* of_clk_del_provider() - Remove a previously registered clock provider
* @np: Device node pointer associated with clock provider
*/
void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
of_node_put(cp->node);
kfree(cp);
break;
}
}
mutex_unlock(&of_clk_mutex);
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
/* Check if we have such a provider in our array */
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
clk = provider->get(clkspec, provider->data);
if (!IS_ERR(clk))
break;
}
return clk;
}
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
struct clk *clk;
mutex_lock(&of_clk_mutex);
clk = __of_clk_get_from_provider(clkspec);
mutex_unlock(&of_clk_mutex);
return clk;
}
int of_clk_get_parent_count(struct device_node *np)
{
return of_count_phandle_with_args(np, "clocks", "#clock-cells");
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
const char *of_clk_get_parent_name(struct device_node *np, int index)
{
struct of_phandle_args clkspec;
struct property *prop;
const char *clk_name;
const __be32 *vp;
u32 pv;
int rc;
int count;
if (index < 0)
return NULL;
rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
&clkspec);
if (rc)
return NULL;
index = clkspec.args_count ? clkspec.args[0] : 0;
count = 0;
/* if there is an indices property, use it to transfer the index
* specified into an array offset for the clock-output-names property.
*/
of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
if (index == pv) {
index = count;
break;
}
count++;
}
if (of_property_read_string_index(clkspec.np, "clock-output-names",
index,
&clk_name) < 0)
clk_name = clkspec.np->name;
of_node_put(clkspec.np);
return clk_name;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
struct clock_provider {
of_clk_init_cb_t clk_init_cb;
struct device_node *np;
struct list_head node;
};
static LIST_HEAD(clk_provider_list);
/*
* This function looks for a parent clock. If there is one, then it
* checks that the provider for this parent clock was initialized, in
* this case the parent clock will be ready.
*/
static int parent_ready(struct device_node *np)
{
int i = 0;
while (true) {
struct clk *clk = of_clk_get(np, i);
/* this parent is ready we can check the next one */
if (!IS_ERR(clk)) {
clk_put(clk);
i++;
continue;
}
/* at least one parent is not ready, we exit now */
if (PTR_ERR(clk) == -EPROBE_DEFER)
return 0;
/*
* Here we make assumption that the device tree is
* written correctly. So an error means that there is
* no more parent. As we didn't exit yet, then the
* previous parent are ready. If there is no clock
* parent, no need to wait for them, then we can
* consider their absence as being ready
*/
return 1;
}
}
/**
* of_clk_init() - Scan and init clock providers from the DT
* @matches: array of compatible values and init functions for providers.
*
* This function scans the device tree for matching clock providers
* and calls their initialization functions. It also does it by trying
* to follow the dependencies.
*/
void __init of_clk_init(const struct of_device_id *matches)
{
const struct of_device_id *match;
struct device_node *np;
struct clock_provider *clk_provider, *next;
bool is_init_done;
bool force = false;
if (!matches)
matches = &__clk_of_table;
/* First prepare the list of the clocks providers */
for_each_matching_node_and_match(np, matches, &match) {
struct clock_provider *parent =
kzalloc(sizeof(struct clock_provider), GFP_KERNEL);
parent->clk_init_cb = match->data;
parent->np = np;
list_add_tail(&parent->node, &clk_provider_list);
}
while (!list_empty(&clk_provider_list)) {
is_init_done = false;
list_for_each_entry_safe(clk_provider, next,
&clk_provider_list, node) {
if (force || parent_ready(clk_provider->np)) {
clk_provider->clk_init_cb(clk_provider->np);
of_clk_set_defaults(clk_provider->np, true);
list_del(&clk_provider->node);
kfree(clk_provider);
is_init_done = true;
}
}
/*
* We didn't manage to initialize any of the
* remaining providers during the last loop, so now we
* initialize all the remaining ones unconditionally
* in case the clock parent was not mandatory
*/
if (!is_init_done)
force = true;
}
}
#endif
| gpl-2.0 |
danielschwierzeck/linux | drivers/net/ethernet/toshiba/spider_net.c | 589 | 71206 | /*
* Network device driver for Cell Processor-Based Blade and Celleb platform
*
* (C) Copyright IBM Corp. 2005
* (C) Copyright 2006 TOSHIBA CORPORATION
*
* Authors : Utz Bacher <utz.bacher@de.ibm.com>
* Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/ioport.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <asm/pci-bridge.h>
#include <net/checksum.h>
#include "spider_net.h"
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
"<Jens.Osterkamp@de.ibm.com>");
MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(VERSION);
MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
module_param(rx_descriptors, int, 0444);
module_param(tx_descriptors, int, 0444);
MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
"in rx chains");
MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
"in tx chain");
char spider_net_driver_name[] = "spidernet";
static const struct pci_device_id spider_net_pci_tbl[] = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
/**
* spider_net_read_reg - reads an SMMIO register of a card
* @card: device structure
* @reg: register to read from
*
* returns the content of the specified SMMIO register.
*/
static inline u32
spider_net_read_reg(struct spider_net_card *card, u32 reg)
{
/* We use the powerpc specific variants instead of readl_be() because
* we know spidernet is not a real PCI device and we can thus avoid the
* performance hit caused by the PCI workarounds.
*/
return in_be32(card->regs + reg);
}
/**
* spider_net_write_reg - writes to an SMMIO register of a card
* @card: device structure
* @reg: register to write to
* @value: value to write into the specified SMMIO register
*/
static inline void
spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
{
/* We use the powerpc specific variants instead of writel_be() because
* we know spidernet is not a real PCI device and we can thus avoid the
* performance hit caused by the PCI workarounds.
*/
out_be32(card->regs + reg, value);
}
/**
* spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
* @reg: PHY register
* @val: value to be written to phy register
*
* spider_net_write_phy_register writes to an arbitrary PHY
* register via the spider GPCWOPCMD register. We assume the queue does
* not run full (not more than 15 commands outstanding).
**/
static void
spider_net_write_phy(struct net_device *netdev, int mii_id,
int reg, int val)
{
struct spider_net_card *card = netdev_priv(netdev);
u32 writevalue;
writevalue = ((u32)mii_id << 21) |
((u32)reg << 16) | ((u32)val);
spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
}
/**
* spider_net_read_phy - read from phy register
* @netdev: network device to be read from
* @mii_id: id of MII
* @reg: PHY register
*
* Returns value read from PHY register
*
* spider_net_write_phy reads from an arbitrary PHY
* register via the spider GPCROPCMD register
**/
static int
spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
{
struct spider_net_card *card = netdev_priv(netdev);
u32 readvalue;
readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
/* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
* interrupt, as we poll for the completion of the read operation
* in spider_net_read_phy. Should take about 50 us */
do {
readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
} while (readvalue & SPIDER_NET_GPREXEC);
readvalue &= SPIDER_NET_GPRDAT_MASK;
return readvalue;
}
/**
* spider_net_setup_aneg - initial auto-negotiation setup
* @card: device structure
**/
static void
spider_net_setup_aneg(struct spider_net_card *card)
{
struct mii_phy *phy = &card->phy;
u32 advertise = 0;
u16 bmsr, estat;
bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
if (bmsr & BMSR_10HALF)
advertise |= ADVERTISED_10baseT_Half;
if (bmsr & BMSR_10FULL)
advertise |= ADVERTISED_10baseT_Full;
if (bmsr & BMSR_100HALF)
advertise |= ADVERTISED_100baseT_Half;
if (bmsr & BMSR_100FULL)
advertise |= ADVERTISED_100baseT_Full;
if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
advertise |= SUPPORTED_1000baseT_Full;
if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
advertise |= SUPPORTED_1000baseT_Half;
sungem_phy_probe(phy, phy->mii_id);
phy->def->ops->setup_aneg(phy, advertise);
}
/**
* spider_net_rx_irq_off - switch off rx irq on this spider card
* @card: device structure
*
* switches off rx irq by masking them out in the GHIINTnMSK register
*/
static void
spider_net_rx_irq_off(struct spider_net_card *card)
{
u32 regvalue;
regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
}
/**
* spider_net_rx_irq_on - switch on rx irq on this spider card
* @card: device structure
*
* switches on rx irq by enabling them in the GHIINTnMSK register
*/
static void
spider_net_rx_irq_on(struct spider_net_card *card)
{
u32 regvalue;
regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
}
/**
* spider_net_set_promisc - sets the unicast address or the promiscuous mode
* @card: card structure
*
* spider_net_set_promisc sets the unicast destination address filter and
* thus either allows for non-promisc mode or promisc mode
*/
static void
spider_net_set_promisc(struct spider_net_card *card)
{
u32 macu, macl;
struct net_device *netdev = card->netdev;
if (netdev->flags & IFF_PROMISC) {
/* clear destination entry 0 */
spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
SPIDER_NET_PROMISC_VALUE);
} else {
macu = netdev->dev_addr[0];
macu <<= 8;
macu |= netdev->dev_addr[1];
memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
macu |= SPIDER_NET_UA_DESCR_VALUE;
spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
SPIDER_NET_NONPROMISC_VALUE);
}
}
/**
* spider_net_get_descr_status -- returns the status of a descriptor
* @descr: descriptor to look at
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
static inline int
spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
{
return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
}
/**
* spider_net_free_chain - free descriptor chain
* @card: card structure
* @chain: address of chain
*
*/
static void
spider_net_free_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain)
{
struct spider_net_descr *descr;
descr = chain->ring;
do {
descr->bus_addr = 0;
descr->hwdescr->next_descr_addr = 0;
descr = descr->next;
} while (descr != chain->ring);
dma_free_coherent(&card->pdev->dev, chain->num_desc,
chain->hwring, chain->dma_addr);
}
/**
* spider_net_init_chain - alloc and link descriptor chain
* @card: card structure
* @chain: address of chain
*
* We manage a circular list that mirrors the hardware structure,
* except that the hardware uses bus addresses.
*
* Returns 0 on success, <0 on failure
*/
static int
spider_net_init_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain)
{
int i;
struct spider_net_descr *descr;
struct spider_net_hw_descr *hwdescr;
dma_addr_t buf;
size_t alloc_size;
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
&chain->dma_addr, GFP_KERNEL);
if (!chain->hwring)
return -ENOMEM;
memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
/* Set up the hardware pointers in each descriptor */
descr = chain->ring;
hwdescr = chain->hwring;
buf = chain->dma_addr;
for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
hwdescr->next_descr_addr = 0;
descr->hwdescr = hwdescr;
descr->bus_addr = buf;
descr->next = descr + 1;
descr->prev = descr - 1;
buf += sizeof(struct spider_net_hw_descr);
}
/* do actual circular list */
(descr-1)->next = chain->ring;
chain->ring->prev = descr-1;
spin_lock_init(&chain->lock);
chain->head = chain->ring;
chain->tail = chain->ring;
return 0;
}
/**
* spider_net_free_rx_chain_contents - frees descr contents in rx chain
* @card: card structure
*
* returns 0 on success, <0 on failure
*/
static void
spider_net_free_rx_chain_contents(struct spider_net_card *card)
{
struct spider_net_descr *descr;
descr = card->rx_chain.head;
do {
if (descr->skb) {
pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
SPIDER_NET_MAX_FRAME,
PCI_DMA_BIDIRECTIONAL);
dev_kfree_skb(descr->skb);
descr->skb = NULL;
}
descr = descr->next;
} while (descr != card->rx_chain.head);
}
/**
* spider_net_prepare_rx_descr - Reinitialize RX descriptor
* @card: card structure
* @descr: descriptor to re-init
*
* Return 0 on success, <0 on failure.
*
* Allocates a new rx skb, iommu-maps it and attaches it to the
* descriptor. Mark the descriptor as activated, ready-to-use.
*/
static int
spider_net_prepare_rx_descr(struct spider_net_card *card,
struct spider_net_descr *descr)
{
struct spider_net_hw_descr *hwdescr = descr->hwdescr;
dma_addr_t buf;
int offset;
int bufsize;
/* we need to round up the buffer size to a multiple of 128 */
bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
(~(SPIDER_NET_RXBUF_ALIGN - 1));
/* and we need to have it 128 byte aligned, therefore we allocate a
* bit more */
/* allocate an skb */
descr->skb = netdev_alloc_skb(card->netdev,
bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
if (!descr->skb) {
if (netif_msg_rx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev,
"Not enough memory to allocate rx buffer\n");
card->spider_stats.alloc_rx_skb_error++;
return -ENOMEM;
}
hwdescr->buf_size = bufsize;
hwdescr->result_size = 0;
hwdescr->valid_size = 0;
hwdescr->data_status = 0;
hwdescr->data_error = 0;
offset = ((unsigned long)descr->skb->data) &
(SPIDER_NET_RXBUF_ALIGN - 1);
if (offset)
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
/* iommu-map the skb */
buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(card->pdev, buf)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
if (netif_msg_rx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
card->spider_stats.rx_iommu_map_error++;
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
} else {
hwdescr->buf_addr = buf;
wmb();
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
SPIDER_NET_DMAC_NOINTR_COMPLETE;
}
return 0;
}
/**
* spider_net_enable_rxchtails - sets RX dmac chain tail addresses
* @card: card structure
*
* spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
* chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac.
*/
static inline void
spider_net_enable_rxchtails(struct spider_net_card *card)
{
/* assume chain is aligned correctly */
spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
card->rx_chain.tail->bus_addr);
}
/**
* spider_net_enable_rxdmac - enables a receive DMA controller
* @card: card structure
*
* spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
* in the GDADMACCNTR register
*/
static inline void
spider_net_enable_rxdmac(struct spider_net_card *card)
{
wmb();
spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
SPIDER_NET_DMA_RX_VALUE);
}
/**
* spider_net_disable_rxdmac - disables the receive DMA controller
* @card: card structure
*
* spider_net_disable_rxdmac terminates processing on the DMA controller
* by turing off the DMA controller, with the force-end flag set.
*/
static inline void
spider_net_disable_rxdmac(struct spider_net_card *card)
{
spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
SPIDER_NET_DMA_RX_FEND_VALUE);
}
/**
* spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
* @card: card structure
*
* refills descriptors in the rx chain: allocates skbs and iommu-maps them.
*/
static void
spider_net_refill_rx_chain(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain = &card->rx_chain;
unsigned long flags;
/* one context doing the refill (and a second context seeing that
* and omitting it) is ok. If called by NAPI, we'll be called again
* as spider_net_decode_one_descr is called several times. If some
* interrupt calls us, the NAPI is about to clean up anyway. */
if (!spin_trylock_irqsave(&chain->lock, flags))
return;
while (spider_net_get_descr_status(chain->head->hwdescr) ==
SPIDER_NET_DESCR_NOT_IN_USE) {
if (spider_net_prepare_rx_descr(card, chain->head))
break;
chain->head = chain->head->next;
}
spin_unlock_irqrestore(&chain->lock, flags);
}
/**
* spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
* @card: card structure
*
* Returns 0 on success, <0 on failure.
*/
static int
spider_net_alloc_rx_skbs(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *start = chain->tail;
struct spider_net_descr *descr = start;
/* Link up the hardware chain pointers */
do {
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
descr = descr->next;
} while (descr != start);
/* Put at least one buffer into the chain. if this fails,
* we've got a problem. If not, spider_net_refill_rx_chain
* will do the rest at the end of this function. */
if (spider_net_prepare_rx_descr(card, chain->head))
goto error;
else
chain->head = chain->head->next;
/* This will allocate the rest of the rx buffers;
* if not, it's business as usual later on. */
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
return 0;
error:
spider_net_free_rx_chain_contents(card);
return -ENOMEM;
}
/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
* @addr: multicast address
*
* returns the hash value.
*
* spider_net_get_multicast_hash calculates a hash value for a given multicast
* address, that is used to set the multicast filter tables
*/
static u8
spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
{
u32 crc;
u8 hash;
char addr_for_crc[ETH_ALEN] = { 0, };
int i, bit;
for (i = 0; i < ETH_ALEN * 8; i++) {
bit = (addr[i / 8] >> (i % 8)) & 1;
addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
}
crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
hash = (crc >> 27);
hash <<= 3;
hash |= crc & 7;
hash &= 0xff;
return hash;
}
/**
* spider_net_set_multi - sets multicast addresses and promisc flags
* @netdev: interface device structure
*
* spider_net_set_multi configures multicast addresses as needed for the
* netdev interface. It also sets up multicast, allmulti and promisc
* flags appropriately
*/
static void
spider_net_set_multi(struct net_device *netdev)
{
struct netdev_hw_addr *ha;
u8 hash;
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
spider_net_set_promisc(card);
if (netdev->flags & IFF_ALLMULTI) {
for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
set_bit(i, bitmask);
}
goto write_hash;
}
/* well, we know, what the broadcast hash value is: it's xfd
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
netdev_for_each_mc_addr(ha, netdev) {
hash = spider_net_get_multicast_hash(netdev, ha->addr);
set_bit(hash, bitmask);
}
write_hash:
for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
reg = 0;
if (test_bit(i * 4, bitmask))
reg += 0x08;
reg <<= 8;
if (test_bit(i * 4 + 1, bitmask))
reg += 0x08;
reg <<= 8;
if (test_bit(i * 4 + 2, bitmask))
reg += 0x08;
reg <<= 8;
if (test_bit(i * 4 + 3, bitmask))
reg += 0x08;
spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
}
}
/**
* spider_net_prepare_tx_descr - fill tx descriptor with skb data
* @card: card structure
* @skb: packet to use
*
* returns 0 on success, <0 on failure.
*
* fills out the descriptor structure with skb data and len. Copies data,
* if needed (32bit DMA!)
*/
static int
spider_net_prepare_tx_descr(struct spider_net_card *card,
struct sk_buff *skb)
{
struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr;
struct spider_net_hw_descr *hwdescr;
dma_addr_t buf;
unsigned long flags;
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(card->pdev, buf)) {
if (netif_msg_tx_err(card) && net_ratelimit())
dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len);
card->spider_stats.tx_iommu_map_error++;
return -ENOMEM;
}
spin_lock_irqsave(&chain->lock, flags);
descr = card->tx_chain.head;
if (descr->next == chain->tail->prev) {
spin_unlock_irqrestore(&chain->lock, flags);
pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
return -ENOMEM;
}
hwdescr = descr->hwdescr;
chain->head = descr->next;
descr->skb = skb;
hwdescr->buf_addr = buf;
hwdescr->buf_size = skb->len;
hwdescr->next_descr_addr = 0;
hwdescr->data_status = 0;
hwdescr->dmac_cmd_status =
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
spin_unlock_irqrestore(&chain->lock, flags);
if (skb->ip_summed == CHECKSUM_PARTIAL)
switch (ip_hdr(skb)->protocol) {
case IPPROTO_TCP:
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
break;
case IPPROTO_UDP:
hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
break;
}
/* Chain the bus address, so that the DMA engine finds this descr. */
wmb();
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
return 0;
}
static int
spider_net_set_low_watermark(struct spider_net_card *card)
{
struct spider_net_descr *descr = card->tx_chain.tail;
struct spider_net_hw_descr *hwdescr;
unsigned long flags;
int status;
int cnt=0;
int i;
/* Measure the length of the queue. Measurement does not
* need to be precise -- does not need a lock. */
while (descr != card->tx_chain.head) {
status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
break;
descr = descr->next;
cnt++;
}
/* If TX queue is short, don't even bother with interrupts */
if (cnt < card->tx_chain.num_desc/4)
return cnt;
/* Set low-watermark 3/4th's of the way into the queue. */
descr = card->tx_chain.tail;
cnt = (cnt*3)/4;
for (i=0;i<cnt; i++)
descr = descr->next;
/* Set the new watermark, clear the old watermark */
spin_lock_irqsave(&card->tx_chain.lock, flags);
descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
if (card->low_watermark && card->low_watermark != descr) {
hwdescr = card->low_watermark->hwdescr;
hwdescr->dmac_cmd_status =
hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
}
card->low_watermark = descr;
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
return cnt;
}
/**
* spider_net_release_tx_chain - processes sent tx descriptors
* @card: adapter structure
* @brutal: if set, don't care about whether descriptor seems to be in use
*
* returns 0 if the tx ring is empty, otherwise 1.
*
* spider_net_release_tx_chain releases the tx descriptors that spider has
* finished with (if non-brutal) or simply release tx descriptors (if brutal).
* If some other context is calling this function, we return 1 so that we're
* scheduled again (if we were scheduled) and will not lose initiative.
*/
static int
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
{
struct net_device *dev = card->netdev;
struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr;
struct spider_net_hw_descr *hwdescr;
struct sk_buff *skb;
u32 buf_addr;
unsigned long flags;
int status;
while (1) {
spin_lock_irqsave(&chain->lock, flags);
if (chain->tail == chain->head) {
spin_unlock_irqrestore(&chain->lock, flags);
return 0;
}
descr = chain->tail;
hwdescr = descr->hwdescr;
status = spider_net_get_descr_status(hwdescr);
switch (status) {
case SPIDER_NET_DESCR_COMPLETE:
dev->stats.tx_packets++;
dev->stats.tx_bytes += descr->skb->len;
break;
case SPIDER_NET_DESCR_CARDOWNED:
if (!brutal) {
spin_unlock_irqrestore(&chain->lock, flags);
return 1;
}
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
case SPIDER_NET_DESCR_FORCE_END:
if (netif_msg_tx_err(card))
dev_err(&card->netdev->dev, "forcing end of tx descriptor "
"with status x%02x\n", status);
dev->stats.tx_errors++;
break;
default:
dev->stats.tx_dropped++;
if (!brutal) {
spin_unlock_irqrestore(&chain->lock, flags);
return 1;
}
}
chain->tail = descr->next;
hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
skb = descr->skb;
descr->skb = NULL;
buf_addr = hwdescr->buf_addr;
spin_unlock_irqrestore(&chain->lock, flags);
/* unmap the skb */
if (skb) {
pci_unmap_single(card->pdev, buf_addr, skb->len,
PCI_DMA_TODEVICE);
dev_consume_skb_any(skb);
}
}
return 0;
}
/**
* spider_net_kick_tx_dma - enables TX DMA processing
* @card: card structure
*
* This routine will start the transmit DMA running if
* it is not already running. This routine ned only be
* called when queueing a new packet to an empty tx queue.
* Writes the current tx chain head as start address
* of the tx descriptor chain and enables the transmission
* DMA engine.
*/
static inline void
spider_net_kick_tx_dma(struct spider_net_card *card)
{
struct spider_net_descr *descr;
if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
SPIDER_NET_TX_DMA_EN)
goto out;
descr = card->tx_chain.tail;
for (;;) {
if (spider_net_get_descr_status(descr->hwdescr) ==
SPIDER_NET_DESCR_CARDOWNED) {
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
descr->bus_addr);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
SPIDER_NET_DMA_TX_VALUE);
break;
}
if (descr == card->tx_chain.head)
break;
descr = descr->next;
}
out:
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
}
/**
* spider_net_xmit - transmits a frame over the device
* @skb: packet to send out
* @netdev: interface device structure
*
* returns 0 on success, !0 on failure
*/
static int
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int cnt;
struct spider_net_card *card = netdev_priv(netdev);
spider_net_release_tx_chain(card, 0);
if (spider_net_prepare_tx_descr(card, skb) != 0) {
netdev->stats.tx_dropped++;
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
cnt = spider_net_set_low_watermark(card);
if (cnt < 5)
spider_net_kick_tx_dma(card);
return NETDEV_TX_OK;
}
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
* @card: card structure
*
* spider_net_cleanup_tx_ring is called by either the tx_timer
* or from the NAPI polling routine.
* This routine releases resources associted with transmitted
* packets, including updating the queue tail pointer.
*/
static void
spider_net_cleanup_tx_ring(struct spider_net_card *card)
{
if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
netif_wake_queue(card->netdev);
}
}
/**
* spider_net_do_ioctl - called for device ioctls
* @netdev: interface device structure
* @ifr: request parameter structure for ioctl
* @cmd: command code for ioctl
*
* returns 0 on success, <0 on failure. Currently, we have no special ioctls.
* -EOPNOTSUPP is returned, if an unknown ioctl was requested
*/
static int
spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
default:
return -EOPNOTSUPP;
}
}
/**
* spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
* @descr: descriptor to process
* @card: card structure
*
* Fills out skb structure and passes the data to the stack.
* The descriptor state is not changed.
*/
static void
spider_net_pass_skb_up(struct spider_net_descr *descr,
struct spider_net_card *card)
{
struct spider_net_hw_descr *hwdescr = descr->hwdescr;
struct sk_buff *skb = descr->skb;
struct net_device *netdev = card->netdev;
u32 data_status = hwdescr->data_status;
u32 data_error = hwdescr->data_error;
skb_put(skb, hwdescr->valid_size);
/* the card seems to add 2 bytes of junk in front
* of the ethernet frame */
#define SPIDER_MISALIGN 2
skb_pull(skb, SPIDER_MISALIGN);
skb->protocol = eth_type_trans(skb, netdev);
/* checksum offload */
skb_checksum_none_assert(skb);
if (netdev->features & NETIF_F_RXCSUM) {
if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN */
}
/* update netdevice statistics */
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
/* pass skb up to stack */
netif_receive_skb(skb);
}
static void show_rx_chain(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *start= chain->tail;
struct spider_net_descr *descr= start;
struct spider_net_hw_descr *hwd = start->hwdescr;
struct device *dev = &card->netdev->dev;
u32 curr_desc, next_desc;
int status;
int tot = 0;
int cnt = 0;
int off = start - chain->ring;
int cstat = hwd->dmac_cmd_status;
dev_info(dev, "Total number of descrs=%d\n",
chain->num_desc);
dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
off, cstat);
curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
status = cstat;
do
{
hwd = descr->hwdescr;
off = descr - chain->ring;
status = hwd->dmac_cmd_status;
if (descr == chain->head)
dev_info(dev, "Chain head is at %d, head status=0x%x\n",
off, status);
if (curr_desc == descr->bus_addr)
dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
off, status);
if (next_desc == descr->bus_addr)
dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
off, status);
if (hwd->next_descr_addr == 0)
dev_info(dev, "chain is cut at %d\n", off);
if (cstat != status) {
int from = (chain->num_desc + off - cnt) % chain->num_desc;
int to = (chain->num_desc + off - 1) % chain->num_desc;
dev_info(dev, "Have %d (from %d to %d) descrs "
"with stat=0x%08x\n", cnt, from, to, cstat);
cstat = status;
cnt = 0;
}
cnt ++;
tot ++;
descr = descr->next;
} while (descr != start);
dev_info(dev, "Last %d descrs with stat=0x%08x "
"for a total of %d descrs\n", cnt, cstat, tot);
#ifdef DEBUG
/* Now dump the whole ring */
descr = start;
do
{
struct spider_net_hw_descr *hwd = descr->hwdescr;
status = spider_net_get_descr_status(hwd);
cnt = descr - chain->ring;
dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
cnt, status, descr->skb);
dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
descr->bus_addr, hwd->buf_addr, hwd->buf_size);
dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
hwd->next_descr_addr, hwd->result_size,
hwd->valid_size);
dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
hwd->dmac_cmd_status, hwd->data_status,
hwd->data_error);
dev_info(dev, "\n");
descr = descr->next;
} while (descr != start);
#endif
}
/**
* spider_net_resync_head_ptr - Advance head ptr past empty descrs
*
* If the driver fails to keep up and empty the queue, then the
* hardware wil run out of room to put incoming packets. This
* will cause the hardware to skip descrs that are full (instead
* of halting/retrying). Thus, once the driver runs, it wil need
* to "catch up" to where the hardware chain pointer is at.
*/
static void spider_net_resync_head_ptr(struct spider_net_card *card)
{
unsigned long flags;
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *descr;
int i, status;
/* Advance head pointer past any empty descrs */
descr = chain->head;
status = spider_net_get_descr_status(descr->hwdescr);
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
return;
spin_lock_irqsave(&chain->lock, flags);
descr = chain->head;
status = spider_net_get_descr_status(descr->hwdescr);
for (i=0; i<chain->num_desc; i++) {
if (status != SPIDER_NET_DESCR_CARDOWNED) break;
descr = descr->next;
status = spider_net_get_descr_status(descr->hwdescr);
}
chain->head = descr;
spin_unlock_irqrestore(&chain->lock, flags);
}
static int spider_net_resync_tail_ptr(struct spider_net_card *card)
{
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *descr;
int i, status;
/* Advance tail pointer past any empty and reaped descrs */
descr = chain->tail;
status = spider_net_get_descr_status(descr->hwdescr);
for (i=0; i<chain->num_desc; i++) {
if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
(status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
descr = descr->next;
status = spider_net_get_descr_status(descr->hwdescr);
}
chain->tail = descr;
if ((i == chain->num_desc) || (i == 0))
return 1;
return 0;
}
/**
* spider_net_decode_one_descr - processes an RX descriptor
* @card: card structure
*
* Returns 1 if a packet has been sent to the stack, otherwise 0.
*
* Processes an RX descriptor by iommu-unmapping the data buffer
* and passing the packet up to the stack. This function is called
* in softirq context, e.g. either bottom half from interrupt or
* NAPI polling context.
*/
static int
spider_net_decode_one_descr(struct spider_net_card *card)
{
struct net_device *dev = card->netdev;
struct spider_net_descr_chain *chain = &card->rx_chain;
struct spider_net_descr *descr = chain->tail;
struct spider_net_hw_descr *hwdescr = descr->hwdescr;
u32 hw_buf_addr;
int status;
status = spider_net_get_descr_status(hwdescr);
/* Nothing in the descriptor, or ring must be empty */
if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
(status == SPIDER_NET_DESCR_NOT_IN_USE))
return 0;
/* descriptor definitively used -- move on tail */
chain->tail = descr->next;
/* unmap descriptor */
hw_buf_addr = hwdescr->buf_addr;
hwdescr->buf_addr = 0xffffffff;
pci_unmap_single(card->pdev, hw_buf_addr,
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
(status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
(status == SPIDER_NET_DESCR_FORCE_END) ) {
if (netif_msg_rx_err(card))
dev_err(&dev->dev,
"dropping RX descriptor with state %d\n", status);
dev->stats.rx_dropped++;
goto bad_desc;
}
if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
(status != SPIDER_NET_DESCR_FRAME_END) ) {
if (netif_msg_rx_err(card))
dev_err(&card->netdev->dev,
"RX descriptor with unknown state %d\n", status);
card->spider_stats.rx_desc_unk_state++;
goto bad_desc;
}
/* The cases we'll throw away the packet immediately */
if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
if (netif_msg_rx_err(card))
dev_err(&card->netdev->dev,
"error in received descriptor found, "
"data_status=x%08x, data_error=x%08x\n",
hwdescr->data_status, hwdescr->data_error);
goto bad_desc;
}
if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
hwdescr->dmac_cmd_status);
pr_err("buf_addr=x%08x\n", hw_buf_addr);
pr_err("buf_size=x%08x\n", hwdescr->buf_size);
pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
pr_err("result_size=x%08x\n", hwdescr->result_size);
pr_err("valid_size=x%08x\n", hwdescr->valid_size);
pr_err("data_status=x%08x\n", hwdescr->data_status);
pr_err("data_error=x%08x\n", hwdescr->data_error);
pr_err("which=%ld\n", descr - card->rx_chain.ring);
card->spider_stats.rx_desc_error++;
goto bad_desc;
}
/* Ok, we've got a packet in descr */
spider_net_pass_skb_up(descr, card);
descr->skb = NULL;
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
return 1;
bad_desc:
if (netif_msg_rx_err(card))
show_rx_chain(card);
dev_kfree_skb_irq(descr->skb);
descr->skb = NULL;
hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
return 0;
}
/**
* spider_net_poll - NAPI poll function called by the stack to return packets
* @netdev: interface device structure
* @budget: number of packets we can pass to the stack at most
*
* returns 0 if no more packets available to the driver/stack. Returns 1,
* if the quota is exceeded, but the driver has still packets.
*
* spider_net_poll returns all packets from the rx descriptors to the stack
* (using netif_receive_skb). If all/enough packets are up, the driver
* reenables interrupts and returns 0. If not, 1 is returned.
*/
static int spider_net_poll(struct napi_struct *napi, int budget)
{
struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
int packets_done = 0;
while (packets_done < budget) {
if (!spider_net_decode_one_descr(card))
break;
packets_done++;
}
if ((packets_done == 0) && (card->num_rx_ints != 0)) {
if (!spider_net_resync_tail_ptr(card))
packets_done = budget;
spider_net_resync_head_ptr(card);
}
card->num_rx_ints = 0;
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
spider_net_cleanup_tx_ring(card);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
if (packets_done < budget) {
napi_complete(napi);
spider_net_rx_irq_on(card);
card->ignore_rx_ramfull = 0;
}
return packets_done;
}
/**
* spider_net_change_mtu - changes the MTU of an interface
* @netdev: interface device structure
* @new_mtu: new MTU value
*
* returns 0 on success, <0 on failure
*/
static int
spider_net_change_mtu(struct net_device *netdev, int new_mtu)
{
/* no need to re-alloc skbs or so -- the max mtu is about 2.3k
* and mtu is outbound only anyway */
if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
(new_mtu > SPIDER_NET_MAX_MTU) )
return -EINVAL;
netdev->mtu = new_mtu;
return 0;
}
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
* @ptr: pointer to new MAC address
*
* Returns 0 on success, <0 on failure. Currently, we don't support this
* and will always return EOPNOTSUPP.
*/
static int
spider_net_set_mac(struct net_device *netdev, void *p)
{
struct spider_net_card *card = netdev_priv(netdev);
u32 macl, macu, regvalue;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
/* switch off GMACTPE and GMACRPE */
regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
regvalue &= ~((1 << 5) | (1 << 6));
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
/* write mac */
macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
(netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
/* switch GMACTPE and GMACRPE back on */
regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
regvalue |= ((1 << 5) | (1 << 6));
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
spider_net_set_promisc(card);
return 0;
}
/**
* spider_net_link_reset
* @netdev: net device structure
*
* This is called when the PHY_LINK signal is asserted. For the blade this is
* not connected so we should never get here.
*
*/
static void
spider_net_link_reset(struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
del_timer_sync(&card->aneg_timer);
/* clear interrupt, block further interrupts */
spider_net_write_reg(card, SPIDER_NET_GMACST,
spider_net_read_reg(card, SPIDER_NET_GMACST));
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
/* reset phy and setup aneg */
card->aneg_count = 0;
card->medium = BCM54XX_COPPER;
spider_net_setup_aneg(card);
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
}
/**
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
*
* spider_net_handle_error_irq treats or ignores all error conditions
* found when an interrupt is presented
*/
static void
spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
u32 error_reg1, u32 error_reg2)
{
u32 i;
int show_error = 1;
/* check GHIINT0STS ************************************/
if (status_reg)
for (i = 0; i < 32; i++)
if (status_reg & (1<<i))
switch (i)
{
/* let error_reg1 and error_reg2 evaluation decide, what to do
case SPIDER_NET_PHYINT:
case SPIDER_NET_GMAC2INT:
case SPIDER_NET_GMAC1INT:
case SPIDER_NET_GFIFOINT:
case SPIDER_NET_DMACINT:
case SPIDER_NET_GSYSINT:
break; */
case SPIDER_NET_GIPSINT:
show_error = 0;
break;
case SPIDER_NET_GPWOPCMPINT:
/* PHY write operation completed */
show_error = 0;
break;
case SPIDER_NET_GPROPCMPINT:
/* PHY read operation completed */
/* we don't use semaphores, as we poll for the completion
* of the read operation in spider_net_read_phy. Should take
* about 50 us */
show_error = 0;
break;
case SPIDER_NET_GPWFFINT:
/* PHY command queue full */
if (netif_msg_intr(card))
dev_err(&card->netdev->dev, "PHY write queue full\n");
show_error = 0;
break;
/* case SPIDER_NET_GRMDADRINT: not used. print a message */
/* case SPIDER_NET_GRMARPINT: not used. print a message */
/* case SPIDER_NET_GRMMPINT: not used. print a message */
case SPIDER_NET_GDTDEN0INT:
/* someone has set TX_DMA_EN to 0 */
show_error = 0;
break;
case SPIDER_NET_GDDDEN0INT: /* fallthrough */
case SPIDER_NET_GDCDEN0INT: /* fallthrough */
case SPIDER_NET_GDBDEN0INT: /* fallthrough */
case SPIDER_NET_GDADEN0INT:
/* someone has set RX_DMA_EN to 0 */
show_error = 0;
break;
/* RX interrupts */
case SPIDER_NET_GDDFDCINT:
case SPIDER_NET_GDCFDCINT:
case SPIDER_NET_GDBFDCINT:
case SPIDER_NET_GDAFDCINT:
/* case SPIDER_NET_GDNMINT: not used. print a message */
/* case SPIDER_NET_GCNMINT: not used. print a message */
/* case SPIDER_NET_GBNMINT: not used. print a message */
/* case SPIDER_NET_GANMINT: not used. print a message */
/* case SPIDER_NET_GRFNMINT: not used. print a message */
show_error = 0;
break;
/* TX interrupts */
case SPIDER_NET_GDTFDCINT:
show_error = 0;
break;
case SPIDER_NET_GTTEDINT:
show_error = 0;
break;
case SPIDER_NET_GDTDCEINT:
/* chain end. If a descriptor should be sent, kick off
* tx dma
if (card->tx_chain.tail != card->tx_chain.head)
spider_net_kick_tx_dma(card);
*/
show_error = 0;
break;
/* case SPIDER_NET_G1TMCNTINT: not used. print a message */
/* case SPIDER_NET_GFREECNTINT: not used. print a message */
}
/* check GHIINT1STS ************************************/
if (error_reg1)
for (i = 0; i < 32; i++)
if (error_reg1 & (1<<i))
switch (i)
{
case SPIDER_NET_GTMFLLINT:
/* TX RAM full may happen on a usual case.
* Logging is not needed. */
show_error = 0;
break;
case SPIDER_NET_GRFDFLLINT: /* fallthrough */
case SPIDER_NET_GRFCFLLINT: /* fallthrough */
case SPIDER_NET_GRFBFLLINT: /* fallthrough */
case SPIDER_NET_GRFAFLLINT: /* fallthrough */
case SPIDER_NET_GRMFLLINT:
/* Could happen when rx chain is full */
if (card->ignore_rx_ramfull == 0) {
card->ignore_rx_ramfull = 1;
spider_net_resync_head_ptr(card);
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
napi_schedule(&card->napi);
}
show_error = 0;
break;
/* case SPIDER_NET_GTMSHTINT: problem, print a message */
case SPIDER_NET_GDTINVDINT:
/* allrighty. tx from previous descr ok */
show_error = 0;
break;
/* chain end */
case SPIDER_NET_GDDDCEINT: /* fallthrough */
case SPIDER_NET_GDCDCEINT: /* fallthrough */
case SPIDER_NET_GDBDCEINT: /* fallthrough */
case SPIDER_NET_GDADCEINT:
spider_net_resync_head_ptr(card);
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
napi_schedule(&card->napi);
show_error = 0;
break;
/* invalid descriptor */
case SPIDER_NET_GDDINVDINT: /* fallthrough */
case SPIDER_NET_GDCINVDINT: /* fallthrough */
case SPIDER_NET_GDBINVDINT: /* fallthrough */
case SPIDER_NET_GDAINVDINT:
/* Could happen when rx chain is full */
spider_net_resync_head_ptr(card);
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
card->num_rx_ints ++;
napi_schedule(&card->napi);
show_error = 0;
break;
/* case SPIDER_NET_GDTRSERINT: problem, print a message */
/* case SPIDER_NET_GDDRSERINT: problem, print a message */
/* case SPIDER_NET_GDCRSERINT: problem, print a message */
/* case SPIDER_NET_GDBRSERINT: problem, print a message */
/* case SPIDER_NET_GDARSERINT: problem, print a message */
/* case SPIDER_NET_GDSERINT: problem, print a message */
/* case SPIDER_NET_GDTPTERINT: problem, print a message */
/* case SPIDER_NET_GDDPTERINT: problem, print a message */
/* case SPIDER_NET_GDCPTERINT: problem, print a message */
/* case SPIDER_NET_GDBPTERINT: problem, print a message */
/* case SPIDER_NET_GDAPTERINT: problem, print a message */
default:
show_error = 1;
break;
}
/* check GHIINT2STS ************************************/
if (error_reg2)
for (i = 0; i < 32; i++)
if (error_reg2 & (1<<i))
switch (i)
{
/* there is nothing we can (want to) do at this time. Log a
* message, we can switch on and off the specific values later on
case SPIDER_NET_GPROPERINT:
case SPIDER_NET_GMCTCRSNGINT:
case SPIDER_NET_GMCTLCOLINT:
case SPIDER_NET_GMCTTMOTINT:
case SPIDER_NET_GMCRCAERINT:
case SPIDER_NET_GMCRCALERINT:
case SPIDER_NET_GMCRALNERINT:
case SPIDER_NET_GMCROVRINT:
case SPIDER_NET_GMCRRNTINT:
case SPIDER_NET_GMCRRXERINT:
case SPIDER_NET_GTITCSERINT:
case SPIDER_NET_GTIFMTERINT:
case SPIDER_NET_GTIPKTRVKINT:
case SPIDER_NET_GTISPINGINT:
case SPIDER_NET_GTISADNGINT:
case SPIDER_NET_GTISPDNGINT:
case SPIDER_NET_GRIFMTERINT:
case SPIDER_NET_GRIPKTRVKINT:
case SPIDER_NET_GRISPINGINT:
case SPIDER_NET_GRISADNGINT:
case SPIDER_NET_GRISPDNGINT:
break;
*/
default:
break;
}
if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
"GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
status_reg, error_reg1, error_reg2);
/* clear interrupt sources */
spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
}
/**
* spider_net_interrupt - interrupt handler for spider_net
* @irq: interrupt number
* @ptr: pointer to net_device
*
* returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
* interrupt found raised by card.
*
* This is the interrupt handler, that turns off
* interrupts for this device and makes the stack poll the driver
*/
static irqreturn_t
spider_net_interrupt(int irq, void *ptr)
{
struct net_device *netdev = ptr;
struct spider_net_card *card = netdev_priv(netdev);
u32 status_reg, error_reg1, error_reg2;
status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
!(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
!(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
return IRQ_NONE;
if (status_reg & SPIDER_NET_RXINT ) {
spider_net_rx_irq_off(card);
napi_schedule(&card->napi);
card->num_rx_ints ++;
}
if (status_reg & SPIDER_NET_TXINT)
napi_schedule(&card->napi);
if (status_reg & SPIDER_NET_LINKINT)
spider_net_link_reset(netdev);
if (status_reg & SPIDER_NET_ERRINT )
spider_net_handle_error_irq(card, status_reg,
error_reg1, error_reg2);
/* clear interrupt sources */
spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* spider_net_poll_controller - artificial interrupt for netconsole etc.
* @netdev: interface device structure
*
* see Documentation/networking/netconsole.txt
*/
static void
spider_net_poll_controller(struct net_device *netdev)
{
disable_irq(netdev->irq);
spider_net_interrupt(netdev->irq, netdev);
enable_irq(netdev->irq);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
/**
* spider_net_enable_interrupts - enable interrupts
* @card: card structure
*
* spider_net_enable_interrupt enables several interrupts
*/
static void
spider_net_enable_interrupts(struct spider_net_card *card)
{
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
SPIDER_NET_INT0_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
SPIDER_NET_INT1_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
SPIDER_NET_INT2_MASK_VALUE);
}
/**
* spider_net_disable_interrupts - disable interrupts
* @card: card structure
*
* spider_net_disable_interrupts disables all the interrupts
*/
static void
spider_net_disable_interrupts(struct spider_net_card *card)
{
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
}
/**
* spider_net_init_card - initializes the card
* @card: card structure
*
* spider_net_init_card initializes the card so that other registers can
* be used
*/
static void
spider_net_init_card(struct spider_net_card *card)
{
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_STOP_VALUE);
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_RUN_VALUE);
/* trigger ETOMOD signal */
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
spider_net_disable_interrupts(card);
}
/**
* spider_net_enable_card - enables the card by setting all kinds of regs
* @card: card structure
*
* spider_net_enable_card sets a lot of SMMIO registers to enable the device
*/
static void
spider_net_enable_card(struct spider_net_card *card)
{
int i;
/* the following array consists of (register),(value) pairs
* that are set in this function. A register of 0 ends the list */
u32 regs[][2] = {
{ SPIDER_NET_GRESUMINTNUM, 0 },
{ SPIDER_NET_GREINTNUM, 0 },
/* set interrupt frame number registers */
/* clear the single DMA engine registers first */
{ SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
{ SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
{ SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
{ SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
/* then set, what we really need */
{ SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
/* timer counter registers and stuff */
{ SPIDER_NET_GFREECNNUM, 0 },
{ SPIDER_NET_GONETIMENUM, 0 },
{ SPIDER_NET_GTOUTFRMNUM, 0 },
/* RX mode setting */
{ SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
/* TX mode setting */
{ SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
/* IPSEC mode setting */
{ SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
{ SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
{ SPIDER_NET_GMRWOLCTRL, 0 },
{ SPIDER_NET_GTESTMD, 0x10000000 },
{ SPIDER_NET_GTTQMSK, 0x00400040 },
{ SPIDER_NET_GMACINTEN, 0 },
/* flow control stuff */
{ SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
{ SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
{ SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
{ 0, 0}
};
i = 0;
while (regs[i][0]) {
spider_net_write_reg(card, regs[i][0], regs[i][1]);
i++;
}
/* clear unicast filter table entries 1 to 14 */
for (i = 1; i <= 14; i++) {
spider_net_write_reg(card,
SPIDER_NET_GMRUAFILnR + i * 8,
0x00080000);
spider_net_write_reg(card,
SPIDER_NET_GMRUAFILnR + i * 8 + 4,
0x00000000);
}
spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
/* set chain tail address for RX chains and
* enable DMA */
spider_net_enable_rxchtails(card);
spider_net_enable_rxdmac(card);
spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
SPIDER_NET_LENLMT_VALUE);
spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
SPIDER_NET_OPMODE_VALUE);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
SPIDER_NET_GDTBSTA);
}
/**
* spider_net_download_firmware - loads firmware into the adapter
* @card: card structure
* @firmware_ptr: pointer to firmware data
*
* spider_net_download_firmware loads the firmware data into the
* adapter. It assumes the length etc. to be allright.
*/
static int
spider_net_download_firmware(struct spider_net_card *card,
const void *firmware_ptr)
{
int sequencer, i;
const u32 *fw_ptr = firmware_ptr;
/* stop sequencers */
spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_STOP_SEQ_VALUE);
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
spider_net_write_reg(card,
SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, *fw_ptr);
fw_ptr++;
}
}
if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
return -EIO;
spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_RUN_SEQ_VALUE);
return 0;
}
/**
* spider_net_init_firmware - reads in firmware parts
* @card: card structure
*
* Returns 0 on success, <0 on failure
*
* spider_net_init_firmware opens the sequencer firmware and does some basic
* checks. This function opens and releases the firmware structure. A call
* to download the firmware is performed before the release.
*
* Firmware format
* ===============
* spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
* the program for each sequencer. Use the command
* tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
* Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
* Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
*
* to generate spider_fw.bin, if you have sequencer programs with something
* like the following contents for each sequencer:
* <ONE LINE COMMENT>
* <FIRST 4-BYTES-WORD FOR SEQUENCER>
* <SECOND 4-BYTES-WORD FOR SEQUENCER>
* ...
* <1024th 4-BYTES-WORD FOR SEQUENCER>
*/
static int
spider_net_init_firmware(struct spider_net_card *card)
{
struct firmware *firmware = NULL;
struct device_node *dn;
const u8 *fw_prop = NULL;
int err = -ENOENT;
int fw_size;
if (request_firmware((const struct firmware **)&firmware,
SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
netif_msg_probe(card) ) {
dev_err(&card->netdev->dev,
"Incorrect size of spidernet firmware in " \
"filesystem. Looking in host firmware...\n");
goto try_host_fw;
}
err = spider_net_download_firmware(card, firmware->data);
release_firmware(firmware);
if (err)
goto try_host_fw;
goto done;
}
try_host_fw:
dn = pci_device_to_OF_node(card->pdev);
if (!dn)
goto out_err;
fw_prop = of_get_property(dn, "firmware", &fw_size);
if (!fw_prop)
goto out_err;
if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
netif_msg_probe(card) ) {
dev_err(&card->netdev->dev,
"Incorrect size of spidernet firmware in host firmware\n");
goto done;
}
err = spider_net_download_firmware(card, fw_prop);
done:
return err;
out_err:
if (netif_msg_probe(card))
dev_err(&card->netdev->dev,
"Couldn't find spidernet firmware in filesystem " \
"or host firmware\n");
return err;
}
/**
* spider_net_open - called upon ifonfig up
* @netdev: interface device structure
*
* returns 0 on success, <0 on failure
*
* spider_net_open allocates all the descriptors and memory needed for
* operation, sets up multicast list and enables interrupts
*/
int
spider_net_open(struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
int result;
result = spider_net_init_firmware(card);
if (result)
goto init_firmware_failed;
/* start probing with copper */
card->aneg_count = 0;
card->medium = BCM54XX_COPPER;
spider_net_setup_aneg(card);
if (card->phy.def->phy_id)
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
result = spider_net_init_chain(card, &card->tx_chain);
if (result)
goto alloc_tx_failed;
card->low_watermark = NULL;
result = spider_net_init_chain(card, &card->rx_chain);
if (result)
goto alloc_rx_failed;
/* Allocate rx skbs */
result = spider_net_alloc_rx_skbs(card);
if (result)
goto alloc_skbs_failed;
spider_net_set_multi(netdev);
/* further enhancement: setup hw vlan, if needed */
result = -EBUSY;
if (request_irq(netdev->irq, spider_net_interrupt,
IRQF_SHARED, netdev->name, netdev))
goto register_int_failed;
spider_net_enable_card(card);
netif_start_queue(netdev);
netif_carrier_on(netdev);
napi_enable(&card->napi);
spider_net_enable_interrupts(card);
return 0;
register_int_failed:
spider_net_free_rx_chain_contents(card);
alloc_skbs_failed:
spider_net_free_chain(card, &card->rx_chain);
alloc_rx_failed:
spider_net_free_chain(card, &card->tx_chain);
alloc_tx_failed:
del_timer_sync(&card->aneg_timer);
init_firmware_failed:
return result;
}
/**
* spider_net_link_phy
* @data: used for pointer to card structure
*
*/
static void spider_net_link_phy(unsigned long data)
{
struct spider_net_card *card = (struct spider_net_card *)data;
struct mii_phy *phy = &card->phy;
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
pr_debug("%s: link is down trying to bring it up\n",
card->netdev->name);
switch (card->medium) {
case BCM54XX_COPPER:
/* enable fiber with autonegotiation first */
if (phy->def->ops->enable_fiber)
phy->def->ops->enable_fiber(phy, 1);
card->medium = BCM54XX_FIBER;
break;
case BCM54XX_FIBER:
/* fiber didn't come up, try to disable fiber autoneg */
if (phy->def->ops->enable_fiber)
phy->def->ops->enable_fiber(phy, 0);
card->medium = BCM54XX_UNKNOWN;
break;
case BCM54XX_UNKNOWN:
/* copper, fiber with and without failed,
* retry from beginning */
spider_net_setup_aneg(card);
card->medium = BCM54XX_COPPER;
break;
}
card->aneg_count = 0;
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
return;
}
/* link still not up, try again later */
if (!(phy->def->ops->poll_link(phy))) {
card->aneg_count++;
mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
return;
}
/* link came up, get abilities */
phy->def->ops->read_link(phy);
spider_net_write_reg(card, SPIDER_NET_GMACST,
spider_net_read_reg(card, SPIDER_NET_GMACST));
spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
if (phy->speed == 1000)
spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
else
spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
card->aneg_count = 0;
pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
card->netdev->name, phy->speed,
phy->duplex == 1 ? "Full" : "Half",
phy->autoneg == 1 ? "" : "no ");
}
/**
* spider_net_setup_phy - setup PHY
* @card: card structure
*
* returns 0 on success, <0 on failure
*
* spider_net_setup_phy is used as part of spider_net_probe.
**/
static int
spider_net_setup_phy(struct spider_net_card *card)
{
struct mii_phy *phy = &card->phy;
spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
SPIDER_NET_DMASEL_VALUE);
spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
SPIDER_NET_PHY_CTRL_VALUE);
phy->dev = card->netdev;
phy->mdio_read = spider_net_read_phy;
phy->mdio_write = spider_net_write_phy;
for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
unsigned short id;
id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
if (id != 0x0000 && id != 0xffff) {
if (!sungem_phy_probe(phy, phy->mii_id)) {
pr_info("Found %s.\n", phy->def->name);
break;
}
}
}
return 0;
}
/**
* spider_net_workaround_rxramfull - work around firmware bug
* @card: card structure
*
* no return value
**/
static void
spider_net_workaround_rxramfull(struct spider_net_card *card)
{
int i, sequencer = 0;
/* cancel reset */
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_RUN_VALUE);
/* empty sequencer data */
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
sequencer * 8, 0x0);
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, 0x0);
}
}
/* set sequencer operation */
spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
/* reset */
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_STOP_VALUE);
}
/**
* spider_net_stop - called upon ifconfig down
* @netdev: interface device structure
*
* always returns 0
*/
int
spider_net_stop(struct net_device *netdev)
{
struct spider_net_card *card = netdev_priv(netdev);
napi_disable(&card->napi);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
del_timer_sync(&card->tx_timer);
del_timer_sync(&card->aneg_timer);
spider_net_disable_interrupts(card);
free_irq(netdev->irq, netdev);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
SPIDER_NET_DMA_TX_FEND_VALUE);
/* turn off DMA, force end */
spider_net_disable_rxdmac(card);
/* release chains */
spider_net_release_tx_chain(card, 1);
spider_net_free_rx_chain_contents(card);
spider_net_free_chain(card, &card->tx_chain);
spider_net_free_chain(card, &card->rx_chain);
return 0;
}
/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
* @data: data, is interface device structure
*
* called as task when tx hangs, resets interface (if interface is up)
*/
static void
spider_net_tx_timeout_task(struct work_struct *work)
{
struct spider_net_card *card =
container_of(work, struct spider_net_card, tx_timeout_task);
struct net_device *netdev = card->netdev;
if (!(netdev->flags & IFF_UP))
goto out;
netif_device_detach(netdev);
spider_net_stop(netdev);
spider_net_workaround_rxramfull(card);
spider_net_init_card(card);
if (spider_net_setup_phy(card))
goto out;
spider_net_open(netdev);
spider_net_kick_tx_dma(card);
netif_device_attach(netdev);
out:
atomic_dec(&card->tx_timeout_task_counter);
}
/**
* spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
*
* called, if tx hangs. Schedules a task that resets the interface
*/
static void
spider_net_tx_timeout(struct net_device *netdev)
{
struct spider_net_card *card;
card = netdev_priv(netdev);
atomic_inc(&card->tx_timeout_task_counter);
if (netdev->flags & IFF_UP)
schedule_work(&card->tx_timeout_task);
else
atomic_dec(&card->tx_timeout_task_counter);
card->spider_stats.tx_timeouts++;
}
static const struct net_device_ops spider_net_ops = {
.ndo_open = spider_net_open,
.ndo_stop = spider_net_stop,
.ndo_start_xmit = spider_net_xmit,
.ndo_set_rx_mode = spider_net_set_multi,
.ndo_set_mac_address = spider_net_set_mac,
.ndo_change_mtu = spider_net_change_mtu,
.ndo_do_ioctl = spider_net_do_ioctl,
.ndo_tx_timeout = spider_net_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
/* HW VLAN */
#ifdef CONFIG_NET_POLL_CONTROLLER
/* poll controller */
.ndo_poll_controller = spider_net_poll_controller,
#endif /* CONFIG_NET_POLL_CONTROLLER */
};
/**
* spider_net_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
*
* fills out function pointers in the net_device structure
*/
static void
spider_net_setup_netdev_ops(struct net_device *netdev)
{
netdev->netdev_ops = &spider_net_ops;
netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
/* ethtool ops */
netdev->ethtool_ops = &spider_net_ethtool_ops;
}
/**
* spider_net_setup_netdev - initialization of net_device
* @card: card structure
*
* Returns 0 on success or <0 on failure
*
* spider_net_setup_netdev initializes the net_device structure
**/
static int
spider_net_setup_netdev(struct spider_net_card *card)
{
int result;
struct net_device *netdev = card->netdev;
struct device_node *dn;
struct sockaddr addr;
const u8 *mac;
SET_NETDEV_DEV(netdev, &card->pdev->dev);
pci_set_drvdata(card->pdev, netdev);
init_timer(&card->tx_timer);
card->tx_timer.function =
(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
card->tx_timer.data = (unsigned long) card;
netdev->irq = card->pdev->irq;
card->aneg_count = 0;
init_timer(&card->aneg_timer);
card->aneg_timer.function = spider_net_link_phy;
card->aneg_timer.data = (unsigned long) card;
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
spider_net_setup_netdev_ops(netdev);
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
if (SPIDER_NET_RX_CSUM_DEFAULT)
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER */
netdev->irq = card->pdev->irq;
card->num_rx_ints = 0;
card->ignore_rx_ramfull = 0;
dn = pci_device_to_OF_node(card->pdev);
if (!dn)
return -EIO;
mac = of_get_property(dn, "local-mac-address", NULL);
if (!mac)
return -EIO;
memcpy(addr.sa_data, mac, ETH_ALEN);
result = spider_net_set_mac(netdev, &addr);
if ((result) && (netif_msg_probe(card)))
dev_err(&card->netdev->dev,
"Failed to set MAC address: %i\n", result);
result = register_netdev(netdev);
if (result) {
if (netif_msg_probe(card))
dev_err(&card->netdev->dev,
"Couldn't register net_device: %i\n", result);
return result;
}
if (netif_msg_probe(card))
pr_info("Initialized device %s.\n", netdev->name);
return 0;
}
/**
* spider_net_alloc_card - allocates net_device and card structure
*
* returns the card structure or NULL in case of errors
*
* the card and net_device structures are linked to each other
*/
static struct spider_net_card *
spider_net_alloc_card(void)
{
struct net_device *netdev;
struct spider_net_card *card;
size_t alloc_size;
alloc_size = sizeof(struct spider_net_card) +
(tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
netdev = alloc_etherdev(alloc_size);
if (!netdev)
return NULL;
card = netdev_priv(netdev);
card->netdev = netdev;
card->msg_enable = SPIDER_NET_DEFAULT_MSG;
INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
init_waitqueue_head(&card->waitq);
atomic_set(&card->tx_timeout_task_counter, 0);
card->rx_chain.num_desc = rx_descriptors;
card->rx_chain.ring = card->darray;
card->tx_chain.num_desc = tx_descriptors;
card->tx_chain.ring = card->darray + rx_descriptors;
return card;
}
/**
* spider_net_undo_pci_setup - releases PCI ressources
* @card: card structure
*
* spider_net_undo_pci_setup releases the mapped regions
*/
static void
spider_net_undo_pci_setup(struct spider_net_card *card)
{
iounmap(card->regs);
pci_release_regions(card->pdev);
}
/**
* spider_net_setup_pci_dev - sets up the device in terms of PCI operations
* @pdev: PCI device
*
* Returns the card structure or NULL if any errors occur
*
* spider_net_setup_pci_dev initializes pdev and together with the
* functions called in spider_net_open configures the device so that
* data can be transferred over it
* The net_device structure is attached to the card structure, if the
* function returns without error.
**/
static struct spider_net_card *
spider_net_setup_pci_dev(struct pci_dev *pdev)
{
struct spider_net_card *card;
unsigned long mmio_start, mmio_len;
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Couldn't enable PCI device\n");
return NULL;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev,
"Couldn't find proper PCI device base address.\n");
goto out_disable_dev;
}
if (pci_request_regions(pdev, spider_net_driver_name)) {
dev_err(&pdev->dev,
"Couldn't obtain PCI resources, aborting.\n");
goto out_disable_dev;
}
pci_set_master(pdev);
card = spider_net_alloc_card();
if (!card) {
dev_err(&pdev->dev,
"Couldn't allocate net_device structure, aborting.\n");
goto out_release_regions;
}
card->pdev = pdev;
/* fetch base address and length of first resource */
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
card->netdev->mem_start = mmio_start;
card->netdev->mem_end = mmio_start + mmio_len;
card->regs = ioremap(mmio_start, mmio_len);
if (!card->regs) {
dev_err(&pdev->dev,
"Couldn't obtain PCI resources, aborting.\n");
goto out_release_regions;
}
return card;
out_release_regions:
pci_release_regions(pdev);
out_disable_dev:
pci_disable_device(pdev);
return NULL;
}
/**
* spider_net_probe - initialization of a device
* @pdev: PCI device
* @ent: entry in the device id list
*
* Returns 0 on success, <0 on failure
*
* spider_net_probe initializes pdev and registers a net_device
* structure for it. After that, the device can be ifconfig'ed up
**/
static int
spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -EIO;
struct spider_net_card *card;
card = spider_net_setup_pci_dev(pdev);
if (!card)
goto out;
spider_net_workaround_rxramfull(card);
spider_net_init_card(card);
err = spider_net_setup_phy(card);
if (err)
goto out_undo_pci;
err = spider_net_setup_netdev(card);
if (err)
goto out_undo_pci;
return 0;
out_undo_pci:
spider_net_undo_pci_setup(card);
free_netdev(card->netdev);
out:
return err;
}
/**
* spider_net_remove - removal of a device
* @pdev: PCI device
*
* Returns 0 on success, <0 on failure
*
* spider_net_remove is called to remove the device and unregisters the
* net_device
**/
static void
spider_net_remove(struct pci_dev *pdev)
{
struct net_device *netdev;
struct spider_net_card *card;
netdev = pci_get_drvdata(pdev);
card = netdev_priv(netdev);
wait_event(card->waitq,
atomic_read(&card->tx_timeout_task_counter) == 0);
unregister_netdev(netdev);
/* switch off card */
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_STOP_VALUE);
spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
SPIDER_NET_CKRCTRL_RUN_VALUE);
spider_net_undo_pci_setup(card);
free_netdev(netdev);
}
static struct pci_driver spider_net_driver = {
.name = spider_net_driver_name,
.id_table = spider_net_pci_tbl,
.probe = spider_net_probe,
.remove = spider_net_remove
};
/**
* spider_net_init - init function when the driver is loaded
*
* spider_net_init registers the device driver
*/
static int __init spider_net_init(void)
{
printk(KERN_INFO "Spidernet version %s.\n", VERSION);
if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
}
if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
}
if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
}
if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
}
return pci_register_driver(&spider_net_driver);
}
/**
* spider_net_cleanup - exit function when driver is unloaded
*
* spider_net_cleanup unregisters the device driver
*/
static void __exit spider_net_cleanup(void)
{
pci_unregister_driver(&spider_net_driver);
}
module_init(spider_net_init);
module_exit(spider_net_cleanup);
| gpl-2.0 |
wan-qy/linux | arch/x86/entry/vdso/vma.c | 589 | 6709 | /*
* Copyright 2007 Andi Kleen, SUSE Labs.
* Subject to the GPL, v.2
*
* This contains most of the x86 vDSO kernel-side code.
*/
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/elf.h>
#include <linux/cpu.h>
#include <asm/vgtod.h>
#include <asm/proto.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/page.h>
#include <asm/hpet.h>
#include <asm/desc.h>
#if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso64_enabled = 1;
#endif
void __init init_vdso_image(const struct vdso_image *image)
{
int i;
int npages = (image->size) / PAGE_SIZE;
BUG_ON(image->size % PAGE_SIZE != 0);
for (i = 0; i < npages; i++)
image->text_mapping.pages[i] =
virt_to_page(image->data + i*PAGE_SIZE);
apply_alternatives((struct alt_instr *)(image->data + image->alt),
(struct alt_instr *)(image->data + image->alt +
image->alt_len));
}
struct linux_binprm;
/*
* Put the vdso above the (randomized) stack with another randomized
* offset. This way there is no hole in the middle of address space.
* To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
*
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
return 0;
#else
unsigned long addr, end;
unsigned offset;
/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}
/*
* Forcibly align the final address in case we have a hardware
* issue that requires alignment for performance reasons.
*/
addr = align_vdso_addr(addr);
return addr;
#endif
}
static int map_vdso(const struct vdso_image *image, bool calculate_addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr, text_start;
int ret = 0;
static struct page *no_pages[] = {NULL};
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]",
.pages = no_pages,
};
if (calculate_addr) {
addr = vdso_addr(current->mm->start_stack,
image->size - image->sym_vvar_start);
} else {
addr = 0;
}
down_write(&mm->mmap_sem);
addr = get_unmapped_area(NULL, addr,
image->size - image->sym_vvar_start, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
text_start = addr - image->sym_vvar_start;
current->mm->context.vdso = (void __user *)text_start;
/*
* MAYWRITE to allow gdb to COW and set breakpoints
*/
vma = _install_special_mapping(mm,
text_start,
image->size,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&image->text_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto up_fail;
}
vma = _install_special_mapping(mm,
addr,
-image->sym_vvar_start,
VM_READ|VM_MAYREAD,
&vvar_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto up_fail;
}
if (image->sym_vvar_page)
ret = remap_pfn_range(vma,
text_start + image->sym_vvar_page,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT,
PAGE_SIZE,
PAGE_READONLY);
if (ret)
goto up_fail;
#ifdef CONFIG_HPET_TIMER
if (hpet_address && image->sym_hpet_page) {
ret = io_remap_pfn_range(vma,
text_start + image->sym_hpet_page,
hpet_address >> PAGE_SHIFT,
PAGE_SIZE,
pgprot_noncached(PAGE_READONLY));
if (ret)
goto up_fail;
}
#endif
up_fail:
if (ret)
current->mm->context.vdso = NULL;
up_write(&mm->mmap_sem);
return ret;
}
#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
static int load_vdso32(void)
{
int ret;
if (vdso32_enabled != 1) /* Other values all mean "disabled" */
return 0;
ret = map_vdso(selected_vdso32, false);
if (ret)
return ret;
if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
current_thread_info()->sysenter_return =
current->mm->context.vdso +
selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
return 0;
}
#endif
#ifdef CONFIG_X86_64
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
if (!vdso64_enabled)
return 0;
return map_vdso(&vdso_image_64, true);
}
#ifdef CONFIG_COMPAT
int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
#ifdef CONFIG_X86_X32_ABI
if (test_thread_flag(TIF_X32)) {
if (!vdso64_enabled)
return 0;
return map_vdso(&vdso_image_x32, true);
}
#endif
return load_vdso32();
}
#endif
#else
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
return load_vdso32();
}
#endif
#ifdef CONFIG_X86_64
static __init int vdso_setup(char *s)
{
vdso64_enabled = simple_strtoul(s, NULL, 0);
return 0;
}
__setup("vdso=", vdso_setup);
#endif
#ifdef CONFIG_X86_64
static void vgetcpu_cpu_init(void *arg)
{
int cpu = smp_processor_id();
struct desc_struct d = { };
unsigned long node = 0;
#ifdef CONFIG_NUMA
node = cpu_to_node(cpu);
#endif
if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
/*
* Store cpu number in limit so that it can be loaded
* quickly in user space in vgetcpu. (12 bits for the CPU
* and 8 bits for the node)
*/
d.limit0 = cpu | ((node & 0xf) << 12);
d.limit = node >> 4;
d.type = 5; /* RO data, expand down, accessed */
d.dpl = 3; /* Visible to user code */
d.s = 1; /* Not a system segment */
d.p = 1; /* Present */
d.d = 1; /* 32-bit */
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
static int
vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
return NOTIFY_DONE;
}
static int __init init_vdso(void)
{
init_vdso_image(&vdso_image_64);
#ifdef CONFIG_X86_X32_ABI
init_vdso_image(&vdso_image_x32);
#endif
cpu_notifier_register_begin();
on_each_cpu(vgetcpu_cpu_init, NULL, 1);
/* notifier priority > KVM */
__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
cpu_notifier_register_done();
return 0;
}
subsys_initcall(init_vdso);
#endif /* CONFIG_X86_64 */
| gpl-2.0 |
aatjitra/PR26 | drivers/media/video/samsung/mali_r3p1_lsi/common/mali_scheduler.c | 845 | 1084 | /*
* Copyright (C) 2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_osk.h"
static _mali_osk_atomic_t mali_job_autonumber;
_mali_osk_errcode_t mali_scheduler_initialize(void)
{
if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0))
{
MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
return _MALI_OSK_ERR_FAULT;
}
return _MALI_OSK_ERR_OK;
}
void mali_scheduler_terminate(void)
{
_mali_osk_atomic_term(&mali_job_autonumber);
}
u32 mali_scheduler_get_new_id(void)
{
u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
return job_id;
}
| gpl-2.0 |
stevezilla/dplive-linux | drivers/mmc/host/msm_sdcc.c | 845 | 35545 | /*
* linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
*
* Copyright (C) 2007 Google Inc,
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
* Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on mmci.c
*
* Author: San Mehat (san@android.com)
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/memory.h>
#include <linux/gfp.h>
#include <linux/gpio.h>
#include <asm/cacheflush.h>
#include <asm/div64.h>
#include <asm/sizes.h>
#include <linux/platform_data/mmc-msm_sdcc.h>
#include <mach/dma.h>
#include <mach/clk.h>
#include "msm_sdcc.h"
#define DRIVER_NAME "msm-sdcc"
#define BUSCLK_PWRSAVE 1
#define BUSCLK_TIMEOUT (HZ)
static unsigned int msmsdcc_fmin = 144000;
static unsigned int msmsdcc_fmax = 50000000;
static unsigned int msmsdcc_4bit = 1;
static unsigned int msmsdcc_pwrsave = 1;
static unsigned int msmsdcc_piopoll = 1;
static unsigned int msmsdcc_sdioirq;
#define PIO_SPINMAX 30
#define CMD_SPINMAX 20
static inline void
msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
{
WARN_ON(!host->clks_on);
BUG_ON(host->curr.mrq);
if (deferr) {
mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
} else {
del_timer_sync(&host->busclk_timer);
/* Need to check clks_on again in case the busclk
* timer fired
*/
if (host->clks_on) {
clk_disable(host->clk);
clk_disable(host->pclk);
host->clks_on = 0;
}
}
}
static inline int
msmsdcc_enable_clocks(struct msmsdcc_host *host)
{
int rc;
del_timer_sync(&host->busclk_timer);
if (!host->clks_on) {
rc = clk_enable(host->pclk);
if (rc)
return rc;
rc = clk_enable(host->clk);
if (rc) {
clk_disable(host->pclk);
return rc;
}
udelay(1 + ((3 * USEC_PER_SEC) /
(host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
host->clks_on = 1;
}
return 0;
}
static inline unsigned int
msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
{
return readl(host->base + reg);
}
static inline void
msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
{
writel(data, host->base + reg);
/* 3 clk delay required! */
udelay(1 + ((3 * USEC_PER_SEC) /
(host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
}
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
u32 c);
static void msmsdcc_reset_and_restore(struct msmsdcc_host *host)
{
u32 mci_clk = 0;
u32 mci_mask0 = 0;
int ret = 0;
/* Save the controller state */
mci_clk = readl(host->base + MMCICLOCK);
mci_mask0 = readl(host->base + MMCIMASK0);
/* Reset the controller */
ret = clk_reset(host->clk, CLK_RESET_ASSERT);
if (ret)
pr_err("%s: Clock assert failed at %u Hz with err %d\n",
mmc_hostname(host->mmc), host->clk_rate, ret);
ret = clk_reset(host->clk, CLK_RESET_DEASSERT);
if (ret)
pr_err("%s: Clock deassert failed at %u Hz with err %d\n",
mmc_hostname(host->mmc), host->clk_rate, ret);
pr_info("%s: Controller has been re-initialiazed\n",
mmc_hostname(host->mmc));
/* Restore the contoller state */
writel(host->pwr, host->base + MMCIPOWER);
writel(mci_clk, host->base + MMCICLOCK);
writel(mci_mask0, host->base + MMCIMASK0);
ret = clk_set_rate(host->clk, host->clk_rate);
if (ret)
pr_err("%s: Failed to set clk rate %u Hz (%d)\n",
mmc_hostname(host->mmc), host->clk_rate, ret);
}
static void
msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
{
BUG_ON(host->curr.data);
host->curr.mrq = NULL;
host->curr.cmd = NULL;
if (mrq->data)
mrq->data->bytes_xfered = host->curr.data_xfered;
if (mrq->cmd->error == -ETIMEDOUT)
mdelay(5);
#if BUSCLK_PWRSAVE
msmsdcc_disable_clocks(host, 1);
#endif
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
*/
spin_unlock(&host->lock);
mmc_request_done(host->mmc, mrq);
spin_lock(&host->lock);
}
static void
msmsdcc_stop_data(struct msmsdcc_host *host)
{
host->curr.data = NULL;
host->curr.got_dataend = 0;
}
uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
{
return host->memres->start + MMCIFIFO;
}
static inline void
msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
msmsdcc_writel(host, arg, MMCIARGUMENT);
msmsdcc_writel(host, c, MMCICOMMAND);
}
static void
msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
{
struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,
MMCIDATALENGTH);
msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
(~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0);
msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
if (host->cmd_cmd) {
msmsdcc_start_command_exec(host,
(u32) host->cmd_cmd->arg,
(u32) host->cmd_c);
}
host->dma.active = 1;
}
static void
msmsdcc_dma_complete_tlet(unsigned long data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *)data;
unsigned long flags;
struct mmc_request *mrq;
struct msm_dmov_errdata err;
spin_lock_irqsave(&host->lock, flags);
host->dma.active = 0;
err = host->dma.err;
mrq = host->curr.mrq;
BUG_ON(!mrq);
WARN_ON(!mrq->data);
if (!(host->dma.result & DMOV_RSLT_VALID)) {
pr_err("msmsdcc: Invalid DataMover result\n");
goto out;
}
if (host->dma.result & DMOV_RSLT_DONE) {
host->curr.data_xfered = host->curr.xfer_size;
} else {
/* Error or flush */
if (host->dma.result & DMOV_RSLT_ERROR)
pr_err("%s: DMA error (0x%.8x)\n",
mmc_hostname(host->mmc), host->dma.result);
if (host->dma.result & DMOV_RSLT_FLUSH)
pr_err("%s: DMA channel flushed (0x%.8x)\n",
mmc_hostname(host->mmc), host->dma.result);
pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
err.flush[0], err.flush[1], err.flush[2],
err.flush[3], err.flush[4], err.flush[5]);
msmsdcc_reset_and_restore(host);
if (!mrq->data->error)
mrq->data->error = -EIO;
}
dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
host->dma.dir);
host->dma.sg = NULL;
host->dma.busy = 0;
if (host->curr.got_dataend || mrq->data->error) {
/*
* If we've already gotten our DATAEND / DATABLKEND
* for this request, then complete it through here.
*/
msmsdcc_stop_data(host);
if (!mrq->data->error)
host->curr.data_xfered = host->curr.xfer_size;
if (!mrq->data->stop || mrq->cmd->error) {
host->curr.mrq = NULL;
host->curr.cmd = NULL;
mrq->data->bytes_xfered = host->curr.data_xfered;
spin_unlock_irqrestore(&host->lock, flags);
#if BUSCLK_PWRSAVE
msmsdcc_disable_clocks(host, 1);
#endif
mmc_request_done(host->mmc, mrq);
return;
} else
msmsdcc_start_command(host, mrq->data->stop, 0);
}
out:
spin_unlock_irqrestore(&host->lock, flags);
return;
}
static void
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
unsigned int result,
struct msm_dmov_errdata *err)
{
struct msmsdcc_dma_data *dma_data =
container_of(cmd, struct msmsdcc_dma_data, hdr);
struct msmsdcc_host *host = dma_data->host;
dma_data->result = result;
if (err)
memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata));
tasklet_schedule(&host->dma_tlet);
}
static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
{
if (host->dma.channel == -1)
return -ENOENT;
if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
return -EINVAL;
if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
return -EINVAL;
return 0;
}
static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
{
struct msmsdcc_nc_dmadata *nc;
dmov_box *box;
uint32_t rows;
uint32_t crci;
unsigned int n;
int i, rc;
struct scatterlist *sg = data->sg;
rc = validate_dma(host, data);
if (rc)
return rc;
host->dma.sg = data->sg;
host->dma.num_ents = data->sg_len;
BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
nc = host->dma.nc;
switch (host->pdev_id) {
case 1:
crci = MSMSDCC_CRCI_SDC1;
break;
case 2:
crci = MSMSDCC_CRCI_SDC2;
break;
case 3:
crci = MSMSDCC_CRCI_SDC3;
break;
case 4:
crci = MSMSDCC_CRCI_SDC4;
break;
default:
host->dma.sg = NULL;
host->dma.num_ents = 0;
return -ENOENT;
}
if (data->flags & MMC_DATA_READ)
host->dma.dir = DMA_FROM_DEVICE;
else
host->dma.dir = DMA_TO_DEVICE;
host->curr.user_pages = 0;
box = &nc->cmd[0];
/* location of command block must be 64 bit aligned */
BUG_ON(host->dma.cmd_busaddr & 0x07);
nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
host->dma.num_ents, host->dma.dir);
if (n == 0) {
pr_err("%s: Unable to map in all sg elements\n",
mmc_hostname(host->mmc));
host->dma.sg = NULL;
host->dma.num_ents = 0;
return -ENOMEM;
}
for_each_sg(host->dma.sg, sg, n, i) {
box->cmd = CMD_MODE_BOX;
if (i == n - 1)
box->cmd |= CMD_LC;
rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
(sg_dma_len(sg) / MCI_FIFOSIZE) ;
if (data->flags & MMC_DATA_READ) {
box->src_row_addr = msmsdcc_fifo_addr(host);
box->dst_row_addr = sg_dma_address(sg);
box->src_dst_len = (MCI_FIFOSIZE << 16) |
(MCI_FIFOSIZE);
box->row_offset = MCI_FIFOSIZE;
box->num_rows = rows * ((1 << 16) + 1);
box->cmd |= CMD_SRC_CRCI(crci);
} else {
box->src_row_addr = sg_dma_address(sg);
box->dst_row_addr = msmsdcc_fifo_addr(host);
box->src_dst_len = (MCI_FIFOSIZE << 16) |
(MCI_FIFOSIZE);
box->row_offset = (MCI_FIFOSIZE << 16);
box->num_rows = rows * ((1 << 16) + 1);
box->cmd |= CMD_DST_CRCI(crci);
}
box++;
}
return 0;
}
static int
snoop_cccr_abort(struct mmc_command *cmd)
{
if ((cmd->opcode == 52) &&
(cmd->arg & 0x80000000) &&
(((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
return 1;
return 0;
}
static void
msmsdcc_start_command_deferred(struct msmsdcc_host *host,
struct mmc_command *cmd, u32 *c)
{
*c |= (cmd->opcode | MCI_CPSM_ENABLE);
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136)
*c |= MCI_CPSM_LONGRSP;
*c |= MCI_CPSM_RESPONSE;
}
if (/*interrupt*/0)
*c |= MCI_CPSM_INTERRUPT;
if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
((cmd->opcode == 24) || (cmd->opcode == 25))) ||
(cmd->opcode == 53))
*c |= MCI_CSPM_DATCMD;
if (host->prog_scan && (cmd->opcode == 12)) {
*c |= MCI_CPSM_PROGENA;
host->prog_enable = true;
}
if (cmd == cmd->mrq->stop)
*c |= MCI_CSPM_MCIABORT;
if (snoop_cccr_abort(cmd))
*c |= MCI_CSPM_MCIABORT;
if (host->curr.cmd != NULL) {
pr_err("%s: Overlapping command requests\n",
mmc_hostname(host->mmc));
}
host->curr.cmd = cmd;
}
static void
msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
struct mmc_command *cmd, u32 c)
{
unsigned int datactrl, timeout;
unsigned long long clks;
unsigned int pio_irqmask = 0;
host->curr.data = data;
host->curr.xfer_size = data->blksz * data->blocks;
host->curr.xfer_remain = host->curr.xfer_size;
host->curr.data_xfered = 0;
host->curr.got_dataend = 0;
memset(&host->pio, 0, sizeof(host->pio));
datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
if (!msmsdcc_config_dma(host, data))
datactrl |= MCI_DPSM_DMAENABLE;
else {
host->pio.sg = data->sg;
host->pio.sg_len = data->sg_len;
host->pio.sg_off = 0;
if (data->flags & MMC_DATA_READ) {
pio_irqmask = MCI_RXFIFOHALFFULLMASK;
if (host->curr.xfer_remain < MCI_FIFOSIZE)
pio_irqmask |= MCI_RXDATAAVLBLMASK;
} else
pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
clks = (unsigned long long)data->timeout_ns * host->clk_rate;
do_div(clks, NSEC_PER_SEC);
timeout = data->timeout_clks + (unsigned int)clks*2 ;
if (datactrl & MCI_DPSM_DMAENABLE) {
/* Save parameters for the exec function */
host->cmd_timeout = timeout;
host->cmd_pio_irqmask = pio_irqmask;
host->cmd_datactrl = datactrl;
host->cmd_cmd = cmd;
host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
host->dma.hdr.data = (void *)host;
host->dma.busy = 1;
if (cmd) {
msmsdcc_start_command_deferred(host, cmd, &c);
host->cmd_c = c;
}
msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
if (data->flags & MMC_DATA_WRITE)
host->prog_scan = true;
} else {
msmsdcc_writel(host, timeout, MMCIDATATIMER);
msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) &
(~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0);
msmsdcc_writel(host, datactrl, MMCIDATACTRL);
if (cmd) {
/* Daisy-chain the command if requested */
msmsdcc_start_command(host, cmd, c);
}
}
}
static void
msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
{
if (cmd == cmd->mrq->stop)
c |= MCI_CSPM_MCIABORT;
host->stats.cmds++;
msmsdcc_start_command_deferred(host, cmd, &c);
msmsdcc_start_command_exec(host, cmd->arg, c);
}
static void
msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
unsigned int status)
{
if (status & MCI_DATACRCFAIL) {
pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
pr_err("%s: opcode 0x%.8x\n", __func__,
data->mrq->cmd->opcode);
pr_err("%s: blksz %d, blocks %d\n", __func__,
data->blksz, data->blocks);
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
data->error = -ETIMEDOUT;
} else if (status & MCI_RXOVERRUN) {
pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
data->error = -EIO;
} else if (status & MCI_TXUNDERRUN) {
pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
data->error = -EIO;
} else {
pr_err("%s: Unknown error (0x%.8x)\n",
mmc_hostname(host->mmc), status);
data->error = -EIO;
}
}
static int
msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
{
uint32_t *ptr = (uint32_t *) buffer;
int count = 0;
if (remain % 4)
remain = ((remain >> 2) + 1) << 2;
while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
*ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
ptr++;
count += sizeof(uint32_t);
remain -= sizeof(uint32_t);
if (remain == 0)
break;
}
return count;
}
static int
msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
unsigned int remain, u32 status)
{
void __iomem *base = host->base;
char *ptr = buffer;
do {
unsigned int count, maxcnt, sz;
maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
MCI_FIFOHALFSIZE;
count = min(remain, maxcnt);
sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
writesl(base + MMCIFIFO, ptr, sz);
ptr += count;
remain -= count;
if (remain == 0)
break;
status = msmsdcc_readl(host, MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
return ptr - buffer;
}
static int
msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
{
while (maxspin) {
if ((msmsdcc_readl(host, MMCISTATUS) & mask))
return 0;
udelay(1);
--maxspin;
}
return -ETIMEDOUT;
}
static irqreturn_t
msmsdcc_pio_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
uint32_t status;
u32 mci_mask0;
status = msmsdcc_readl(host, MMCISTATUS);
mci_mask0 = msmsdcc_readl(host, MMCIMASK0);
if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0)
return IRQ_NONE;
do {
unsigned long flags;
unsigned int remain, len;
char *buffer;
if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
break;
if (msmsdcc_spin_on_status(host,
(MCI_TXFIFOHALFEMPTY |
MCI_RXDATAAVLBL),
PIO_SPINMAX)) {
break;
}
}
/* Map the current scatter buffer */
local_irq_save(flags);
buffer = kmap_atomic(sg_page(host->pio.sg))
+ host->pio.sg->offset;
buffer += host->pio.sg_off;
remain = host->pio.sg->length - host->pio.sg_off;
len = 0;
if (status & MCI_RXACTIVE)
len = msmsdcc_pio_read(host, buffer, remain);
if (status & MCI_TXACTIVE)
len = msmsdcc_pio_write(host, buffer, remain, status);
/* Unmap the buffer */
kunmap_atomic(buffer);
local_irq_restore(flags);
host->pio.sg_off += len;
host->curr.xfer_remain -= len;
host->curr.data_xfered += len;
remain -= len;
if (remain == 0) {
/* This sg page is full - do some housekeeping */
if (status & MCI_RXACTIVE && host->curr.user_pages)
flush_dcache_page(sg_page(host->pio.sg));
if (!--host->pio.sg_len) {
memset(&host->pio, 0, sizeof(host->pio));
break;
}
/* Advance to next sg */
host->pio.sg++;
host->pio.sg_off = 0;
}
status = msmsdcc_readl(host, MMCISTATUS);
} while (1);
if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) |
MCI_RXDATAAVLBLMASK, MMCIMASK0);
if (!host->curr.xfer_remain)
msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0,
MMCIMASK0);
return IRQ_HANDLED;
}
static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
{
struct mmc_command *cmd = host->curr.cmd;
host->curr.cmd = NULL;
cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
if (status & MCI_CMDTIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL &&
cmd->flags & MMC_RSP_CRC) {
pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
cmd->error = -EILSEQ;
}
if (!cmd->data || cmd->error) {
if (host->curr.data && host->dma.sg)
msm_dmov_stop_cmd(host->dma.channel,
&host->dma.hdr, 0);
else if (host->curr.data) { /* Non DMA */
msmsdcc_reset_and_restore(host);
msmsdcc_stop_data(host);
msmsdcc_request_end(host, cmd->mrq);
} else { /* host->data == NULL */
if (!cmd->error && host->prog_enable) {
if (status & MCI_PROGDONE) {
host->prog_scan = false;
host->prog_enable = false;
msmsdcc_request_end(host, cmd->mrq);
} else {
host->curr.cmd = cmd;
}
} else {
if (host->prog_enable) {
host->prog_scan = false;
host->prog_enable = false;
}
msmsdcc_request_end(host, cmd->mrq);
}
}
} else if (cmd->data)
if (!(cmd->data->flags & MMC_DATA_READ))
msmsdcc_start_data(host, cmd->data,
NULL, 0);
}
static void
msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
void __iomem *base)
{
struct mmc_data *data = host->curr.data;
if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
msmsdcc_do_cmdirq(host, status);
}
if (!data)
return;
/* Check for data errors */
if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
msmsdcc_data_err(host, data, status);
host->curr.data_xfered = 0;
if (host->dma.sg)
msm_dmov_stop_cmd(host->dma.channel,
&host->dma.hdr, 0);
else {
msmsdcc_reset_and_restore(host);
if (host->curr.data)
msmsdcc_stop_data(host);
if (!data->stop)
msmsdcc_request_end(host, data->mrq);
else
msmsdcc_start_command(host, data->stop, 0);
}
}
/* Check for data done */
if (!host->curr.got_dataend && (status & MCI_DATAEND))
host->curr.got_dataend = 1;
/*
* If DMA is still in progress, we complete via the completion handler
*/
if (host->curr.got_dataend && !host->dma.busy) {
/*
* There appears to be an issue in the controller where
* if you request a small block transfer (< fifo size),
* you may get your DATAEND/DATABLKEND irq without the
* PIO data irq.
*
* Check to see if there is still data to be read,
* and simulate a PIO irq.
*/
if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
msmsdcc_pio_irq(1, host);
msmsdcc_stop_data(host);
if (!data->error)
host->curr.data_xfered = host->curr.xfer_size;
if (!data->stop)
msmsdcc_request_end(host, data->mrq);
else
msmsdcc_start_command(host, data->stop, 0);
}
}
static irqreturn_t
msmsdcc_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
void __iomem *base = host->base;
u32 status;
int ret = 0;
int cardint = 0;
spin_lock(&host->lock);
do {
status = msmsdcc_readl(host, MMCISTATUS);
status &= msmsdcc_readl(host, MMCIMASK0);
if ((status & (~MCI_IRQ_PIO)) == 0)
break;
msmsdcc_writel(host, status, MMCICLEAR);
if (status & MCI_SDIOINTR)
status &= ~MCI_SDIOINTR;
if (!status)
break;
msmsdcc_handle_irq_data(host, status, base);
if (status & MCI_SDIOINTOPER) {
cardint = 1;
status &= ~MCI_SDIOINTOPER;
}
ret = 1;
} while (status);
spin_unlock(&host->lock);
/*
* We have to delay handling the card interrupt as it calls
* back into the driver.
*/
if (cardint)
mmc_signal_sdio_irq(host->mmc);
return IRQ_RETVAL(ret);
}
static void
msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct msmsdcc_host *host = mmc_priv(mmc);
unsigned long flags;
WARN_ON(host->curr.mrq != NULL);
WARN_ON(host->pwr == 0);
spin_lock_irqsave(&host->lock, flags);
host->stats.reqs++;
if (host->eject) {
if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
mrq->cmd->error = 0;
mrq->data->bytes_xfered = mrq->data->blksz *
mrq->data->blocks;
} else
mrq->cmd->error = -ENOMEDIUM;
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(mmc, mrq);
return;
}
msmsdcc_enable_clocks(host);
host->curr.mrq = mrq;
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
/* Queue/read data, daisy-chain command when data starts */
msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
else
msmsdcc_start_command(host, mrq->cmd, 0);
if (host->cmdpoll && !msmsdcc_spin_on_status(host,
MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
CMD_SPINMAX)) {
uint32_t status = msmsdcc_readl(host, MMCISTATUS);
msmsdcc_do_cmdirq(host, status);
msmsdcc_writel(host,
MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
MMCICLEAR);
host->stats.cmdpoll_hits++;
} else {
host->stats.cmdpoll_misses++;
}
spin_unlock_irqrestore(&host->lock, flags);
}
static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable)
{
struct msm_mmc_gpio_data *curr;
int i, rc = 0;
if (!host->plat->gpio_data || host->gpio_config_status == enable)
return;
curr = host->plat->gpio_data;
for (i = 0; i < curr->size; i++) {
if (enable) {
rc = gpio_request(curr->gpio[i].no,
curr->gpio[i].name);
if (rc) {
pr_err("%s: gpio_request(%d, %s) failed %d\n",
mmc_hostname(host->mmc),
curr->gpio[i].no,
curr->gpio[i].name, rc);
goto free_gpios;
}
} else {
gpio_free(curr->gpio[i].no);
}
}
host->gpio_config_status = enable;
return;
free_gpios:
for (; i >= 0; i--)
gpio_free(curr->gpio[i].no);
}
static void
msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msmsdcc_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0;
int rc;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
msmsdcc_enable_clocks(host);
spin_unlock_irqrestore(&host->lock, flags);
if (ios->clock) {
if (ios->clock != host->clk_rate) {
rc = clk_set_rate(host->clk, ios->clock);
if (rc < 0)
pr_err("%s: Error setting clock rate (%d)\n",
mmc_hostname(host->mmc), rc);
else
host->clk_rate = ios->clock;
}
clk |= MCI_CLK_ENABLE;
}
if (ios->bus_width == MMC_BUS_WIDTH_4)
clk |= (2 << 10); /* Set WIDEBUS */
if (ios->clock > 400000 && msmsdcc_pwrsave)
clk |= (1 << 9); /* PWRSAVE */
clk |= (1 << 12); /* FLOW_ENA */
clk |= (1 << 15); /* feedback clock */
if (host->plat->translate_vdd)
pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
switch (ios->power_mode) {
case MMC_POWER_OFF:
msmsdcc_setup_gpio(host, false);
break;
case MMC_POWER_UP:
pwr |= MCI_PWR_UP;
msmsdcc_setup_gpio(host, true);
break;
case MMC_POWER_ON:
pwr |= MCI_PWR_ON;
break;
}
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
pwr |= MCI_OD;
msmsdcc_writel(host, clk, MMCICLOCK);
if (host->pwr != pwr) {
host->pwr = pwr;
msmsdcc_writel(host, pwr, MMCIPOWER);
}
#if BUSCLK_PWRSAVE
spin_lock_irqsave(&host->lock, flags);
msmsdcc_disable_clocks(host, 1);
spin_unlock_irqrestore(&host->lock, flags);
#endif
}
static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct msmsdcc_host *host = mmc_priv(mmc);
unsigned long flags;
u32 status;
spin_lock_irqsave(&host->lock, flags);
if (msmsdcc_sdioirq == 1) {
status = msmsdcc_readl(host, MMCIMASK0);
if (enable)
status |= MCI_SDIOINTOPERMASK;
else
status &= ~MCI_SDIOINTOPERMASK;
host->saved_irq0mask = status;
msmsdcc_writel(host, status, MMCIMASK0);
}
spin_unlock_irqrestore(&host->lock, flags);
}
static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct msmsdcc_host *host = mmc_priv(mmc);
if (host->plat->init_card)
host->plat->init_card(card);
}
static const struct mmc_host_ops msmsdcc_ops = {
.request = msmsdcc_request,
.set_ios = msmsdcc_set_ios,
.enable_sdio_irq = msmsdcc_enable_sdio_irq,
.init_card = msmsdcc_init_card,
};
static void
msmsdcc_check_status(unsigned long data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *)data;
unsigned int status;
if (!host->plat->status) {
mmc_detect_change(host->mmc, 0);
goto out;
}
status = host->plat->status(mmc_dev(host->mmc));
host->eject = !status;
if (status ^ host->oldstat) {
pr_info("%s: Slot status change detected (%d -> %d)\n",
mmc_hostname(host->mmc), host->oldstat, status);
if (status)
mmc_detect_change(host->mmc, (5 * HZ) / 2);
else
mmc_detect_change(host->mmc, 0);
}
host->oldstat = status;
out:
if (host->timer.function)
mod_timer(&host->timer, jiffies + HZ);
}
static irqreturn_t
msmsdcc_platform_status_irq(int irq, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
pr_debug("%s: %d\n", __func__, irq);
msmsdcc_check_status((unsigned long) host);
return IRQ_HANDLED;
}
static void
msmsdcc_status_notify_cb(int card_present, void *dev_id)
{
struct msmsdcc_host *host = dev_id;
pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),
card_present);
msmsdcc_check_status((unsigned long) host);
}
static void
msmsdcc_busclk_expired(unsigned long _data)
{
struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
if (host->clks_on)
msmsdcc_disable_clocks(host, 0);
}
static int
msmsdcc_init_dma(struct msmsdcc_host *host)
{
memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
host->dma.host = host;
host->dma.channel = -1;
if (!host->dmares)
return -ENODEV;
host->dma.nc = dma_alloc_coherent(NULL,
sizeof(struct msmsdcc_nc_dmadata),
&host->dma.nc_busaddr,
GFP_KERNEL);
if (host->dma.nc == NULL) {
pr_err("Unable to allocate DMA buffer\n");
return -ENOMEM;
}
memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
host->dma.cmd_busaddr = host->dma.nc_busaddr;
host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
offsetof(struct msmsdcc_nc_dmadata, cmdptr);
host->dma.channel = host->dmares->start;
return 0;
}
static int
msmsdcc_probe(struct platform_device *pdev)
{
struct msm_mmc_platform_data *plat = pdev->dev.platform_data;
struct msmsdcc_host *host;
struct mmc_host *mmc;
struct resource *cmd_irqres = NULL;
struct resource *stat_irqres = NULL;
struct resource *memres = NULL;
struct resource *dmares = NULL;
int ret;
/* must have platform data */
if (!plat) {
pr_err("%s: Platform data not available\n", __func__);
ret = -EINVAL;
goto out;
}
if (pdev->id < 1 || pdev->id > 4)
return -EINVAL;
if (pdev->resource == NULL || pdev->num_resources < 2) {
pr_err("%s: Invalid resource\n", __func__);
return -ENXIO;
}
memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"cmd_irq");
stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"status_irq");
if (!cmd_irqres || !memres) {
pr_err("%s: Invalid resource\n", __func__);
return -ENXIO;
}
/*
* Setup our host structure
*/
mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
}
host = mmc_priv(mmc);
host->pdev_id = pdev->id;
host->plat = plat;
host->mmc = mmc;
host->curr.cmd = NULL;
init_timer(&host->busclk_timer);
host->busclk_timer.data = (unsigned long) host;
host->busclk_timer.function = msmsdcc_busclk_expired;
host->cmdpoll = 1;
host->base = ioremap(memres->start, PAGE_SIZE);
if (!host->base) {
ret = -ENOMEM;
goto host_free;
}
host->cmd_irqres = cmd_irqres;
host->memres = memres;
host->dmares = dmares;
spin_lock_init(&host->lock);
tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
(unsigned long)host);
/*
* Setup DMA
*/
if (host->dmares) {
ret = msmsdcc_init_dma(host);
if (ret)
goto ioremap_free;
} else {
host->dma.channel = -1;
}
/* Get our clocks */
host->pclk = clk_get(&pdev->dev, "sdc_pclk");
if (IS_ERR(host->pclk)) {
ret = PTR_ERR(host->pclk);
goto dma_free;
}
host->clk = clk_get(&pdev->dev, "sdc_clk");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto pclk_put;
}
ret = clk_set_rate(host->clk, msmsdcc_fmin);
if (ret) {
pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
goto clk_put;
}
ret = clk_prepare(host->pclk);
if (ret)
goto clk_put;
ret = clk_prepare(host->clk);
if (ret)
goto clk_unprepare_p;
/* Enable clocks */
ret = msmsdcc_enable_clocks(host);
if (ret)
goto clk_unprepare;
host->pclk_rate = clk_get_rate(host->pclk);
host->clk_rate = clk_get_rate(host->clk);
/*
* Setup MMC host structure
*/
mmc->ops = &msmsdcc_ops;
mmc->f_min = msmsdcc_fmin;
mmc->f_max = msmsdcc_fmax;
mmc->ocr_avail = plat->ocr_mask;
if (msmsdcc_4bit)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (msmsdcc_sdioirq)
mmc->caps |= MMC_CAP_SDIO_IRQ;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
mmc->max_segs = NR_SG;
mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
mmc->max_blk_count = 65536;
mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
mmc->max_seg_size = mmc->max_req_size;
msmsdcc_writel(host, 0, MMCIMASK0);
msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
host->saved_irq0mask = MCI_IRQENABLE;
/*
* Setup card detect change
*/
memset(&host->timer, 0, sizeof(host->timer));
if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
unsigned long irqflags = IRQF_SHARED |
(stat_irqres->flags & IRQF_TRIGGER_MASK);
host->stat_irq = stat_irqres->start;
ret = request_irq(host->stat_irq,
msmsdcc_platform_status_irq,
irqflags,
DRIVER_NAME " (slot)",
host);
if (ret) {
pr_err("%s: Unable to get slot IRQ %d (%d)\n",
mmc_hostname(mmc), host->stat_irq, ret);
goto clk_disable;
}
} else if (plat->register_status_notify) {
plat->register_status_notify(msmsdcc_status_notify_cb, host);
} else if (!plat->status)
pr_err("%s: No card detect facilities available\n",
mmc_hostname(mmc));
else {
init_timer(&host->timer);
host->timer.data = (unsigned long)host;
host->timer.function = msmsdcc_check_status;
host->timer.expires = jiffies + HZ;
add_timer(&host->timer);
}
if (plat->status) {
host->oldstat = host->plat->status(mmc_dev(host->mmc));
host->eject = !host->oldstat;
}
ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
DRIVER_NAME " (cmd)", host);
if (ret)
goto stat_irq_free;
ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
DRIVER_NAME " (pio)", host);
if (ret)
goto cmd_irq_free;
mmc_set_drvdata(pdev, mmc);
mmc_add_host(mmc);
pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
mmc_hostname(mmc), (unsigned long long)memres->start,
(unsigned int) cmd_irqres->start,
(unsigned int) host->stat_irq, host->dma.channel);
pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
(mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
pr_info("%s: Power save feature enable = %d\n",
mmc_hostname(mmc), msmsdcc_pwrsave);
if (host->dma.channel != -1) {
pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
mmc_hostname(mmc), host->dma.cmd_busaddr,
host->dma.cmdptr_busaddr);
} else
pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
if (host->timer.function)
pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
return 0;
cmd_irq_free:
free_irq(cmd_irqres->start, host);
stat_irq_free:
if (host->stat_irq)
free_irq(host->stat_irq, host);
clk_disable:
msmsdcc_disable_clocks(host, 0);
clk_unprepare:
clk_unprepare(host->clk);
clk_unprepare_p:
clk_unprepare(host->pclk);
clk_put:
clk_put(host->clk);
pclk_put:
clk_put(host->pclk);
dma_free:
if (host->dmares)
dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata),
host->dma.nc, host->dma.nc_busaddr);
ioremap_free:
tasklet_kill(&host->dma_tlet);
iounmap(host->base);
host_free:
mmc_free_host(mmc);
out:
return ret;
}
#ifdef CONFIG_PM
static int
msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
if (host->stat_irq)
disable_irq(host->stat_irq);
msmsdcc_writel(host, 0, MMCIMASK0);
if (host->clks_on)
msmsdcc_disable_clocks(host, 0);
}
return 0;
}
static int
msmsdcc_resume(struct platform_device *dev)
{
struct mmc_host *mmc = mmc_get_drvdata(dev);
if (mmc) {
struct msmsdcc_host *host = mmc_priv(mmc);
msmsdcc_enable_clocks(host);
msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
if (host->stat_irq)
enable_irq(host->stat_irq);
#if BUSCLK_PWRSAVE
msmsdcc_disable_clocks(host, 1);
#endif
}
return 0;
}
#else
#define msmsdcc_suspend 0
#define msmsdcc_resume 0
#endif
static struct platform_driver msmsdcc_driver = {
.probe = msmsdcc_probe,
.suspend = msmsdcc_suspend,
.resume = msmsdcc_resume,
.driver = {
.name = "msm_sdcc",
},
};
module_platform_driver(msmsdcc_driver);
MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
raphui/linux | drivers/media/pci/cx23885/cx23885-vbi.c | 845 | 7992 | /*
* Driver for the Conexant CX23885 PCIe bridge
*
* Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include "cx23885.h"
static unsigned int vbibufs = 4;
module_param(vbibufs, int, 0644);
MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
static unsigned int vbi_debug;
module_param(vbi_debug, int, 0644);
MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
#define dprintk(level, fmt, arg...)\
do { if (vbi_debug >= level)\
printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
} while (0)
/* ------------------------------------------------------------------ */
#define VBI_LINE_LENGTH 1440
#define VBI_NTSC_LINE_COUNT 12
#define VBI_PAL_LINE_COUNT 18
int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_dev *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 27000000;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
f->fmt.vbi.offset = 0;
f->fmt.vbi.flags = 0;
if (dev->tvnorm & V4L2_STD_525_60) {
/* ntsc */
f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9;
f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9;
f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT;
f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT;
} else if (dev->tvnorm & V4L2_STD_625_50) {
/* pal */
f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5;
f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5;
f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT;
f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT;
}
return 0;
}
/* We're given the Video Interrupt status register.
* The cx23885_video_irq() func has already validated
* the potential error bits, we just need to
* deal with vbi payload and return indication if
* we actually processed any payload.
*/
int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status)
{
u32 count;
int handled = 0;
if (status & VID_BC_MSK_VBI_RISCI1) {
dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__);
spin_lock(&dev->slock);
count = cx_read(VID_A_GPCNT);
cx23885_video_wakeup(dev, &dev->vbiq, count);
spin_unlock(&dev->slock);
handled++;
}
return handled;
}
static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
dprintk(1, "%s()\n", __func__);
/* setup fifo + format */
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02],
VBI_LINE_LENGTH, buf->risc.dma);
/* reset counter */
cx_write(VID_A_GPCNT_CTL, 3);
cx_write(VID_A_VBI_CTRL, 3);
cx_write(VBI_A_GPCNT_CTL, 3);
q->count = 0;
/* enable irq */
cx23885_irq_add_enable(dev, 0x01);
cx_set(VID_A_INT_MSK, 0x000022);
/* start dma */
cx_set(DEV_CNTRL2, (1<<5));
cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
return 0;
}
/* ------------------------------------------------------------------ */
static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct cx23885_dev *dev = q->drv_priv;
unsigned lines = VBI_PAL_LINE_COUNT;
if (dev->tvnorm & V4L2_STD_525_60)
lines = VBI_NTSC_LINE_COUNT;
*num_planes = 1;
sizes[0] = lines * VBI_LINE_LENGTH * 2;
alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
static int buffer_prepare(struct vb2_buffer *vb)
{
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned lines = VBI_PAL_LINE_COUNT;
if (dev->tvnorm & V4L2_STD_525_60)
lines = VBI_NTSC_LINE_COUNT;
if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2)
return -EINVAL;
vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
cx23885_risc_vbibuffer(dev->pci, &buf->risc,
sgt->sgl,
0, VBI_LINE_LENGTH * lines,
VBI_LINE_LENGTH, 0,
lines);
return 0;
}
static void buffer_finish(struct vb2_buffer *vb)
{
struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
}
/*
* The risc program for each buffer works as follows: it starts with a simple
* 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
* buffer follows and at the end we have a JUMP back to the start + 12 (skipping
* the initial JUMP).
*
* This is the risc program of the first buffer to be queued if the active list
* is empty and it just keeps DMAing this buffer without generating any
* interrupts.
*
* If a new buffer is added then the initial JUMP in the code for that buffer
* will generate an interrupt which signals that the previous buffer has been
* DMAed successfully and that it can be returned to userspace.
*
* It also sets the final jump of the previous buffer to the start of the new
* buffer, thus chaining the new buffer into the DMA chain. This is a single
* atomic u32 write, so there is no race condition.
*
* The end-result of all this that you only get an interrupt when a buffer
* is ready, so the control flow is very easy.
*/
static void buffer_queue(struct vb2_buffer *vb)
{
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb);
struct cx23885_buffer *prev;
struct cx23885_dmaqueue *q = &dev->vbiq;
unsigned long flags;
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
if (list_empty(&q->active)) {
spin_lock_irqsave(&dev->slock, flags);
list_add_tail(&buf->queue, &q->active);
spin_unlock_irqrestore(&dev->slock, flags);
dprintk(2, "[%p/%d] vbi_queue - first active\n",
buf, buf->vb.v4l2_buf.index);
} else {
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
prev = list_entry(q->active.prev, struct cx23885_buffer,
queue);
spin_lock_irqsave(&dev->slock, flags);
list_add_tail(&buf->queue, &q->active);
spin_unlock_irqrestore(&dev->slock, flags);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2, "[%p/%d] buffer_queue - append to active\n",
buf, buf->vb.v4l2_buf.index);
}
}
static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct cx23885_dev *dev = q->drv_priv;
struct cx23885_dmaqueue *dmaq = &dev->vbiq;
struct cx23885_buffer *buf = list_entry(dmaq->active.next,
struct cx23885_buffer, queue);
cx23885_start_vbi_dma(dev, dmaq, buf);
return 0;
}
static void cx23885_stop_streaming(struct vb2_queue *q)
{
struct cx23885_dev *dev = q->drv_priv;
struct cx23885_dmaqueue *dmaq = &dev->vbiq;
unsigned long flags;
cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */
spin_lock_irqsave(&dev->slock, flags);
while (!list_empty(&dmaq->active)) {
struct cx23885_buffer *buf = list_entry(dmaq->active.next,
struct cx23885_buffer, queue);
list_del(&buf->queue);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->slock, flags);
}
struct vb2_ops cx23885_vbi_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
.buf_queue = buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = cx23885_start_streaming,
.stop_streaming = cx23885_stop_streaming,
};
| gpl-2.0 |
garwynn/L720_MDL_Kernel | arch/arm/kernel/fiq.c | 1869 | 3187 | /*
* linux/arch/arm/kernel/fiq.c
*
* Copyright (C) 1998 Russell King
* Copyright (C) 1998, 1999 Phil Blundell
*
* FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
*
* FIQ support re-written by Russell King to be more generic
*
* We now properly support a method by which the FIQ handlers can
* be stacked onto the vector. We still do not support sharing
* the FIQ vector itself.
*
* Operation is as follows:
* 1. Owner A claims FIQ:
* - default_fiq relinquishes control.
* 2. Owner A:
* - inserts code.
* - sets any registers,
* - enables FIQ.
* 3. Owner B claims FIQ:
* - if owner A has a relinquish function.
* - disable FIQs.
* - saves any registers.
* - returns zero.
* 4. Owner B:
* - inserts code.
* - sets any registers,
* - enables FIQ.
* 5. Owner B releases FIQ:
* - Owner A is asked to reacquire FIQ:
* - inserts code.
* - restores saved registers.
* - enables FIQ.
* 6. Goto 3
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/fiq.h>
#include <asm/irq.h>
#include <asm/traps.h>
static unsigned long no_fiq_insn;
/* Default reacquire function
* - we always relinquish FIQ control
* - we always reacquire FIQ control
*/
static int fiq_def_op(void *ref, int relinquish)
{
if (!relinquish)
set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
return 0;
}
static struct fiq_handler default_owner = {
.name = "default",
.fiq_op = fiq_def_op,
};
static struct fiq_handler *current_fiq = &default_owner;
int show_fiq_list(struct seq_file *p, int prec)
{
if (current_fiq != &default_owner)
seq_printf(p, "%*s: %s\n", prec, "FIQ",
current_fiq->name);
return 0;
}
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length);
#else
memcpy(vectors_page + 0x1c, start, length);
#endif
flush_icache_range(0xffff001c, 0xffff001c + length);
if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length);
}
int claim_fiq(struct fiq_handler *f)
{
int ret = 0;
if (current_fiq) {
ret = -EBUSY;
if (current_fiq->fiq_op != NULL)
ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
}
if (!ret) {
f->next = current_fiq;
current_fiq = f;
}
return ret;
}
void release_fiq(struct fiq_handler *f)
{
if (current_fiq != f) {
printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
f->name, current_fiq->name);
dump_stack();
return;
}
do
current_fiq = current_fiq->next;
while (current_fiq->fiq_op(current_fiq->dev_id, 0));
}
void enable_fiq(int fiq)
{
enable_irq(fiq + FIQ_START);
}
void disable_fiq(int fiq)
{
disable_irq(fiq + FIQ_START);
}
EXPORT_SYMBOL(set_fiq_handler);
EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(claim_fiq);
EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(void)
{
no_fiq_insn = *(unsigned long *)0xffff001c;
}
| gpl-2.0 |
ARMP/ARMP-i9300 | drivers/pci/hotplug/rpadlpar_core.c | 3661 | 10351 | /*
* Interface for Dynamic Logical Partitioning of I/O Slots on
* RPA-compliant PPC64 platform.
*
* John Rose <johnrose@austin.ibm.com>
* Linda Xie <lxie@us.ibm.com>
*
* October 2003
*
* Copyright (C) 2003 IBM.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <asm/pci-bridge.h>
#include <linux/mutex.h>
#include <asm/rtas.h>
#include <asm/vio.h>
#include "../pci.h"
#include "rpaphp.h"
#include "rpadlpar.h"
static DEFINE_MUTEX(rpadlpar_mutex);
#define DLPAR_MODULE_NAME "rpadlpar_io"
#define NODE_TYPE_VIO 1
#define NODE_TYPE_SLOT 2
#define NODE_TYPE_PHB 3
static struct device_node *find_vio_slot_node(char *drc_name)
{
struct device_node *parent = of_find_node_by_name(NULL, "vdevice");
struct device_node *dn = NULL;
char *name;
int rc;
if (!parent)
return NULL;
while ((dn = of_get_next_child(parent, dn))) {
rc = rpaphp_get_drc_props(dn, NULL, &name, NULL, NULL);
if ((rc == 0) && (!strcmp(drc_name, name)))
break;
}
return dn;
}
/* Find dlpar-capable pci node that contains the specified name and type */
static struct device_node *find_php_slot_pci_node(char *drc_name,
char *drc_type)
{
struct device_node *np = NULL;
char *name;
char *type;
int rc;
while ((np = of_find_node_by_name(np, "pci"))) {
rc = rpaphp_get_drc_props(np, NULL, &name, &type, NULL);
if (rc == 0)
if (!strcmp(drc_name, name) && !strcmp(drc_type, type))
break;
}
return np;
}
static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
{
struct device_node *dn;
dn = find_php_slot_pci_node(drc_name, "SLOT");
if (dn) {
*node_type = NODE_TYPE_SLOT;
return dn;
}
dn = find_php_slot_pci_node(drc_name, "PHB");
if (dn) {
*node_type = NODE_TYPE_PHB;
return dn;
}
dn = find_vio_slot_node(drc_name);
if (dn) {
*node_type = NODE_TYPE_VIO;
return dn;
}
return NULL;
}
/**
* find_php_slot - return hotplug slot structure for device node
* @dn: target &device_node
*
* This routine will return the hotplug slot structure
* for a given device node. Note that built-in PCI slots
* may be dlpar-able, but not hot-pluggable, so this routine
* will return NULL for built-in PCI slots.
*/
static struct slot *find_php_slot(struct device_node *dn)
{
struct list_head *tmp, *n;
struct slot *slot;
list_for_each_safe(tmp, n, &rpaphp_slot_head) {
slot = list_entry(tmp, struct slot, rpaphp_slot_list);
if (slot->dn == dn)
return slot;
}
return NULL;
}
static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent,
struct device_node *dev_dn)
{
struct pci_dev *tmp = NULL;
struct device_node *child_dn;
list_for_each_entry(tmp, &parent->devices, bus_list) {
child_dn = pci_device_to_OF_node(tmp);
if (child_dn == dev_dn)
return tmp;
}
return NULL;
}
static void dlpar_pci_add_bus(struct device_node *dn)
{
struct pci_dn *pdn = PCI_DN(dn);
struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
eeh_add_device_tree_early(dn);
/* Add EADS device to PHB bus, adding new entry to bus->devices */
dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
if (!dev) {
printk(KERN_ERR "%s: failed to create pci dev for %s\n",
__func__, dn->full_name);
return;
}
/* Scan below the new bridge */
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
of_scan_pci_bridge(dn, dev);
/* Map IO space for child bus, which may or may not succeed */
pcibios_map_io_space(dev->subordinate);
/* Finish adding it : resource allocation, adding devices, etc...
* Note that we need to perform the finish pass on the -parent-
* bus of the EADS bridge so the bridge device itself gets
* properly added
*/
pcibios_finish_adding_to_bus(phb->bus);
}
static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
{
struct pci_dev *dev;
struct pci_controller *phb;
if (pcibios_find_pci_bus(dn))
return -EINVAL;
/* Add pci bus */
dlpar_pci_add_bus(dn);
/* Confirm new bridge dev was created */
phb = PCI_DN(dn)->phb;
dev = dlpar_find_new_dev(phb->bus, dn);
if (!dev) {
printk(KERN_ERR "%s: unable to add bus %s\n", __func__,
drc_name);
return -EIO;
}
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
printk(KERN_ERR "%s: unexpected header type %d, unable to add bus %s\n",
__func__, dev->hdr_type, drc_name);
return -EIO;
}
/* Add hotplug slot */
if (rpaphp_add_slot(dn)) {
printk(KERN_ERR "%s: unable to add hotplug slot %s\n",
__func__, drc_name);
return -EIO;
}
return 0;
}
static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
{
struct slot *slot;
struct pci_dn *pdn;
int rc = 0;
if (!pcibios_find_pci_bus(dn))
return -EINVAL;
/* If pci slot is hotplugable, use hotplug to remove it */
slot = find_php_slot(dn);
if (slot && rpaphp_deregister_slot(slot)) {
printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
__func__, drc_name);
return -EIO;
}
pdn = dn->data;
BUG_ON(!pdn || !pdn->phb);
rc = remove_phb_dynamic(pdn->phb);
if (rc < 0)
return rc;
pdn->phb = NULL;
return 0;
}
static int dlpar_add_phb(char *drc_name, struct device_node *dn)
{
struct pci_controller *phb;
if (PCI_DN(dn) && PCI_DN(dn)->phb) {
/* PHB already exists */
return -EINVAL;
}
phb = init_phb_dynamic(dn);
if (!phb)
return -EIO;
if (rpaphp_add_slot(dn)) {
printk(KERN_ERR "%s: unable to add hotplug slot %s\n",
__func__, drc_name);
return -EIO;
}
return 0;
}
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
if (vio_find_node(dn))
return -EINVAL;
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
"%s: failed to register vio node %s\n",
__func__, drc_name);
return -EIO;
}
return 0;
}
/**
* dlpar_add_slot - DLPAR add an I/O Slot
* @drc_name: drc-name of newly added slot
*
* Make the hotplug module and the kernel aware of a newly added I/O Slot.
* Return Codes:
* 0 Success
* -ENODEV Not a valid drc_name
* -EINVAL Slot already added
* -ERESTARTSYS Signalled before obtaining lock
* -EIO Internal PCI Error
*/
int dlpar_add_slot(char *drc_name)
{
struct device_node *dn = NULL;
int node_type;
int rc = -EIO;
if (mutex_lock_interruptible(&rpadlpar_mutex))
return -ERESTARTSYS;
/* Find newly added node */
dn = find_dlpar_node(drc_name, &node_type);
if (!dn) {
rc = -ENODEV;
goto exit;
}
switch (node_type) {
case NODE_TYPE_VIO:
rc = dlpar_add_vio_slot(drc_name, dn);
break;
case NODE_TYPE_SLOT:
rc = dlpar_add_pci_slot(drc_name, dn);
break;
case NODE_TYPE_PHB:
rc = dlpar_add_phb(drc_name, dn);
break;
}
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
mutex_unlock(&rpadlpar_mutex);
return rc;
}
/**
* dlpar_remove_vio_slot - DLPAR remove a virtual I/O Slot
* @drc_name: drc-name of newly added slot
* @dn: &device_node
*
* Remove the kernel and hotplug representations of an I/O Slot.
* Return Codes:
* 0 Success
* -EINVAL Vio dev doesn't exist
*/
static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
{
struct vio_dev *vio_dev;
vio_dev = vio_find_node(dn);
if (!vio_dev)
return -EINVAL;
vio_unregister_device(vio_dev);
return 0;
}
/**
* dlpar_remove_pci_slot - DLPAR remove a PCI I/O Slot
* @drc_name: drc-name of newly added slot
* @dn: &device_node
*
* Remove the kernel and hotplug representations of a PCI I/O Slot.
* Return Codes:
* 0 Success
* -ENODEV Not a valid drc_name
* -EIO Internal PCI Error
*/
int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
{
struct pci_bus *bus;
struct slot *slot;
bus = pcibios_find_pci_bus(dn);
if (!bus)
return -EINVAL;
pr_debug("PCI: Removing PCI slot below EADS bridge %s\n",
bus->self ? pci_name(bus->self) : "<!PHB!>");
slot = find_php_slot(dn);
if (slot) {
pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n",
pci_domain_nr(bus), bus->number);
if (rpaphp_deregister_slot(slot)) {
printk(KERN_ERR
"%s: unable to remove hotplug slot %s\n",
__func__, drc_name);
return -EIO;
}
}
/* Remove all devices below slot */
pcibios_remove_pci_devices(bus);
/* Unmap PCI IO space */
if (pcibios_unmap_io_space(bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__func__);
return -ERANGE;
}
/* Remove the EADS bridge device itself */
BUG_ON(!bus->self);
pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
eeh_remove_bus_device(bus->self);
pci_remove_bus_device(bus->self);
return 0;
}
/**
* dlpar_remove_slot - DLPAR remove an I/O Slot
* @drc_name: drc-name of newly added slot
*
* Remove the kernel and hotplug representations of an I/O Slot.
* Return Codes:
* 0 Success
* -ENODEV Not a valid drc_name
* -EINVAL Slot already removed
* -ERESTARTSYS Signalled before obtaining lock
* -EIO Internal Error
*/
int dlpar_remove_slot(char *drc_name)
{
struct device_node *dn;
int node_type;
int rc = 0;
if (mutex_lock_interruptible(&rpadlpar_mutex))
return -ERESTARTSYS;
dn = find_dlpar_node(drc_name, &node_type);
if (!dn) {
rc = -ENODEV;
goto exit;
}
switch (node_type) {
case NODE_TYPE_VIO:
rc = dlpar_remove_vio_slot(drc_name, dn);
break;
case NODE_TYPE_PHB:
rc = dlpar_remove_phb(drc_name, dn);
break;
case NODE_TYPE_SLOT:
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
vm_unmap_aliases();
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
exit:
mutex_unlock(&rpadlpar_mutex);
return rc;
}
static inline int is_dlpar_capable(void)
{
int rc = rtas_token("ibm,configure-connector");
return (int) (rc != RTAS_UNKNOWN_SERVICE);
}
int __init rpadlpar_io_init(void)
{
int rc = 0;
if (!is_dlpar_capable()) {
printk(KERN_WARNING "%s: partition not DLPAR capable\n",
__func__);
return -EPERM;
}
rc = dlpar_sysfs_init();
return rc;
}
void rpadlpar_io_exit(void)
{
dlpar_sysfs_exit();
return;
}
module_init(rpadlpar_io_init);
module_exit(rpadlpar_io_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AlmightyMegadeth00/kernel_oneplus_msm8974 | drivers/input/misc/twl4030-vibra.c | 4941 | 7113 | /*
* twl4030-vibra.c - TWL4030 Vibrator driver
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Written by Henrik Saari <henrik.saari@nokia.com>
* Updates by Felipe Balbi <felipe.balbi@nokia.com>
* Input by Jari Vanhala <ext-jari.vanhala@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
#include <linux/mfd/twl4030-audio.h>
#include <linux/input.h>
#include <linux/slab.h>
/* MODULE ID2 */
#define LEDEN 0x00
/* ForceFeedback */
#define EFFECT_DIR_180_DEG 0x8000 /* range is 0 - 0xFFFF */
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
struct workqueue_struct *workqueue;
struct work_struct play_work;
bool enabled;
int speed;
int direction;
bool coexist;
};
static void vibra_disable_leds(void)
{
u8 reg;
/* Disable LEDA & LEDB, cannot be used with vibra (PWM) */
twl_i2c_read_u8(TWL4030_MODULE_LED, ®, LEDEN);
reg &= ~0x03;
twl_i2c_write_u8(TWL4030_MODULE_LED, LEDEN, reg);
}
/* Powers H-Bridge and enables audio clk */
static void vibra_enable(struct vibra_info *info)
{
u8 reg;
twl4030_audio_enable_resource(TWL4030_AUDIO_RES_POWER);
/* turn H-Bridge on */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
twl4030_audio_enable_resource(TWL4030_AUDIO_RES_APLL);
info->enabled = true;
}
static void vibra_disable(struct vibra_info *info)
{
u8 reg;
/* Power down H-Bridge */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
twl4030_audio_disable_resource(TWL4030_AUDIO_RES_APLL);
twl4030_audio_disable_resource(TWL4030_AUDIO_RES_POWER);
info->enabled = false;
}
static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
int dir;
int pwm;
u8 reg;
dir = info->direction;
pwm = info->speed;
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
if (pwm && (!info->coexist || !(reg & TWL4030_VIBRA_SEL))) {
if (!info->enabled)
vibra_enable(info);
/* set vibra rotation direction */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
®, TWL4030_REG_VIBRA_CTL);
reg = (dir) ? (reg | TWL4030_VIBRA_DIR) :
(reg & ~TWL4030_VIBRA_DIR);
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
reg, TWL4030_REG_VIBRA_CTL);
/* set PWM, 1 = max, 255 = min */
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
256 - pwm, TWL4030_REG_VIBRA_SET);
} else {
if (info->enabled)
vibra_disable(info);
}
}
/*** Input/ForceFeedback ***/
static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
info->speed = effect->u.rumble.strong_magnitude >> 8;
if (!info->speed)
info->speed = effect->u.rumble.weak_magnitude >> 9;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
queue_work(info->workqueue, &info->play_work);
return 0;
}
static int twl4030_vibra_open(struct input_dev *input)
{
struct vibra_info *info = input_get_drvdata(input);
info->workqueue = create_singlethread_workqueue("vibra");
if (info->workqueue == NULL) {
dev_err(&input->dev, "couldn't create workqueue\n");
return -ENOMEM;
}
return 0;
}
static void twl4030_vibra_close(struct input_dev *input)
{
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
INIT_WORK(&info->play_work, vibra_play_work); /* cleanup */
destroy_workqueue(info->workqueue);
info->workqueue = NULL;
if (info->enabled)
vibra_disable(info);
}
/*** Module ***/
#ifdef CONFIG_PM_SLEEP
static int twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
if (info->enabled)
vibra_disable(info);
return 0;
}
static int twl4030_vibra_resume(struct device *dev)
{
vibra_disable_leds();
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct vibra_info *info;
int ret;
if (!pdata) {
dev_dbg(&pdev->dev, "platform_data not available\n");
return -EINVAL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = &pdev->dev;
info->coexist = pdata->coexist;
INIT_WORK(&info->play_work, vibra_play_work);
info->input_dev = input_allocate_device();
if (info->input_dev == NULL) {
dev_err(&pdev->dev, "couldn't allocate input device\n");
ret = -ENOMEM;
goto err_kzalloc;
}
input_set_drvdata(info->input_dev, info);
info->input_dev->name = "twl4030:vibrator";
info->input_dev->id.version = 1;
info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->open = twl4030_vibra_open;
info->input_dev->close = twl4030_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
goto err_ialloc;
}
ret = input_register_device(info->input_dev);
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't register input device\n");
goto err_iff;
}
vibra_disable_leds();
platform_set_drvdata(pdev, info);
return 0;
err_iff:
input_ff_destroy(info->input_dev);
err_ialloc:
input_free_device(info->input_dev);
err_kzalloc:
kfree(info);
return ret;
}
static int __devexit twl4030_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
/* this also free ff-memless and calls close if needed */
input_unregister_device(info->input_dev);
kfree(info);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver twl4030_vibra_driver = {
.probe = twl4030_vibra_probe,
.remove = __devexit_p(twl4030_vibra_remove),
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
.pm = &twl4030_vibra_pm_ops,
},
};
module_platform_driver(twl4030_vibra_driver);
MODULE_ALIAS("platform:twl4030-vibra");
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nokia Corporation");
| gpl-2.0 |
tommytarts/QuantumKernelM8-Sense | drivers/staging/omapdrm/omap_plane.c | 4941 | 13049 | /*
* drivers/staging/omapdrm/omap_plane.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kfifo.h>
#include "omap_drv.h"
/* some hackery because omapdss has an 'enum omap_plane' (which would be
* better named omap_plane_id).. and compiler seems unhappy about having
* both a 'struct omap_plane' and 'enum omap_plane'
*/
#define omap_plane _omap_plane
/*
* plane funcs
*/
struct callback {
void (*fxn)(void *);
void *arg;
};
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
struct omap_plane {
struct drm_plane base;
struct omap_overlay *ovl;
struct omap_overlay_info info;
/* Source values, converted to integers because we don't support
* fractional positions:
*/
unsigned int src_x, src_y;
/* last fb that we pinned: */
struct drm_framebuffer *pinned_fb;
uint32_t nformats;
uint32_t formats[32];
/* for synchronizing access to unpins fifo */
struct mutex unpin_mutex;
/* set of bo's pending unpin until next END_WIN irq */
DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
int num_unpins, pending_num_unpins;
/* for deferred unpin when we need to wait for scanout complete irq */
struct work_struct work;
/* callback on next endwin irq */
struct callback endwin;
};
/* map from ovl->id to the irq we are interested in for scanout-done */
static const uint32_t id2irq[] = {
[OMAP_DSS_GFX] = DISPC_IRQ_GFX_END_WIN,
[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
};
static void dispc_isr(void *arg, uint32_t mask)
{
struct drm_plane *plane = arg;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_drm_private *priv = plane->dev->dev_private;
omap_dispc_unregister_isr(dispc_isr, plane,
id2irq[omap_plane->ovl->id]);
queue_work(priv->wq, &omap_plane->work);
}
static void unpin_worker(struct work_struct *work)
{
struct omap_plane *omap_plane =
container_of(work, struct omap_plane, work);
struct callback endwin;
mutex_lock(&omap_plane->unpin_mutex);
DBG("unpinning %d of %d", omap_plane->num_unpins,
omap_plane->num_unpins + omap_plane->pending_num_unpins);
while (omap_plane->num_unpins > 0) {
struct drm_gem_object *bo = NULL;
int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
WARN_ON(!ret);
omap_gem_put_paddr(bo);
drm_gem_object_unreference_unlocked(bo);
omap_plane->num_unpins--;
}
endwin = omap_plane->endwin;
omap_plane->endwin.fxn = NULL;
mutex_unlock(&omap_plane->unpin_mutex);
if (endwin.fxn)
endwin.fxn(endwin.arg);
}
static void install_irq(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_overlay *ovl = omap_plane->ovl;
int ret;
ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
/*
* omapdss has upper limit on # of registered irq handlers,
* which we shouldn't hit.. but if we do the limit should
* be raised or bad things happen:
*/
WARN_ON(ret == -EBUSY);
}
/* push changes down to dss2 */
static int commit(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_overlay *ovl = omap_plane->ovl;
struct omap_overlay_info *info = &omap_plane->info;
int ret;
DBG("%s", ovl->name);
DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
info->out_height, info->screen_width);
DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
info->paddr, info->p_uv_addr);
/* NOTE: do we want to do this at all here, or just wait
* for dpms(ON) since other CRTC's may not have their mode
* set yet, so fb dimensions may still change..
*/
ret = ovl->set_overlay_info(ovl, info);
if (ret) {
dev_err(dev->dev, "could not set overlay info\n");
return ret;
}
mutex_lock(&omap_plane->unpin_mutex);
omap_plane->num_unpins += omap_plane->pending_num_unpins;
omap_plane->pending_num_unpins = 0;
mutex_unlock(&omap_plane->unpin_mutex);
/* our encoder doesn't necessarily get a commit() after this, in
* particular in the dpms() and mode_set_base() cases, so force the
* manager to update:
*
* could this be in the encoder somehow?
*/
if (ovl->manager) {
ret = ovl->manager->apply(ovl->manager);
if (ret) {
dev_err(dev->dev, "could not apply settings\n");
return ret;
}
/*
* NOTE: really this should be atomic w/ mgr->apply() but
* omapdss does not expose such an API
*/
if (omap_plane->num_unpins > 0)
install_irq(plane);
} else {
struct omap_drm_private *priv = dev->dev_private;
queue_work(priv->wq, &omap_plane->work);
}
if (ovl->is_enabled(ovl)) {
omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
info->out_width, info->out_height);
}
return 0;
}
/* when CRTC that we are attached to has potentially changed, this checks
* if we are attached to proper manager, and if necessary updates.
*/
static void update_manager(struct drm_plane *plane)
{
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_overlay *ovl = omap_plane->ovl;
struct omap_overlay_manager *mgr = NULL;
int i;
if (plane->crtc) {
for (i = 0; i < priv->num_encoders; i++) {
struct drm_encoder *encoder = priv->encoders[i];
if (encoder->crtc == plane->crtc) {
mgr = omap_encoder_get_manager(encoder);
break;
}
}
}
if (ovl->manager != mgr) {
bool enabled = ovl->is_enabled(ovl);
/* don't switch things around with enabled overlays: */
if (enabled)
omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
if (ovl->manager) {
DBG("disconnecting %s from %s", ovl->name,
ovl->manager->name);
ovl->unset_manager(ovl);
}
if (mgr) {
DBG("connecting %s to %s", ovl->name, mgr->name);
ovl->set_manager(ovl, mgr);
}
if (enabled && mgr)
omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
}
}
static void unpin(void *arg, struct drm_gem_object *bo)
{
struct drm_plane *plane = arg;
struct omap_plane *omap_plane = to_omap_plane(plane);
if (kfifo_put(&omap_plane->unpin_fifo,
(const struct drm_gem_object **)&bo)) {
omap_plane->pending_num_unpins++;
/* also hold a ref so it isn't free'd while pinned */
drm_gem_object_reference(bo);
} else {
dev_err(plane->dev->dev, "unpin fifo full!\n");
omap_gem_put_paddr(bo);
}
}
/* update which fb (if any) is pinned for scanout */
static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
if (pinned_fb != fb) {
int ret;
DBG("%p -> %p", pinned_fb, fb);
mutex_lock(&omap_plane->unpin_mutex);
ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
mutex_unlock(&omap_plane->unpin_mutex);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
omap_plane->pinned_fb = NULL;
return ret;
}
omap_plane->pinned_fb = fb;
}
return 0;
}
/* update parameters that are dependent on the framebuffer dimensions and
* position within the fb that this plane scans out from. This is called
* when framebuffer or x,y base may have changed.
*/
static void update_scanout(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_overlay_info *info = &omap_plane->info;
int ret;
ret = update_pin(plane, plane->fb);
if (ret) {
dev_err(plane->dev->dev,
"could not pin fb: %d\n", ret);
omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
return;
}
omap_framebuffer_update_scanout(plane->fb,
omap_plane->src_x, omap_plane->src_y, info);
DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
omap_plane->src_x, omap_plane->src_y,
(u32)info->paddr, (u32)info->p_uv_addr,
info->screen_width);
}
int omap_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
/* src values are in Q16 fixed point, convert to integer: */
src_x = src_x >> 16;
src_y = src_y >> 16;
src_w = src_w >> 16;
src_h = src_h >> 16;
omap_plane->info.pos_x = crtc_x;
omap_plane->info.pos_y = crtc_y;
omap_plane->info.out_width = crtc_w;
omap_plane->info.out_height = crtc_h;
omap_plane->info.width = src_w;
omap_plane->info.height = src_h;
omap_plane->src_x = src_x;
omap_plane->src_y = src_y;
/* note: this is done after this fxn returns.. but if we need
* to do a commit/update_scanout, etc before this returns we
* need the current value.
*/
plane->fb = fb;
plane->crtc = crtc;
update_scanout(plane);
update_manager(plane);
return 0;
}
static int omap_plane_update(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
}
static int omap_plane_disable(struct drm_plane *plane)
{
return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
}
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
DBG("%s", omap_plane->ovl->name);
omap_plane_disable(plane);
drm_plane_cleanup(plane);
WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
kfifo_free(&omap_plane->unpin_fifo);
kfree(omap_plane);
}
int omap_plane_dpms(struct drm_plane *plane, int mode)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_overlay *ovl = omap_plane->ovl;
int r;
DBG("%s: %d", omap_plane->ovl->name, mode);
if (mode == DRM_MODE_DPMS_ON) {
update_scanout(plane);
r = commit(plane);
if (!r)
r = ovl->enable(ovl);
} else {
struct omap_drm_private *priv = plane->dev->dev_private;
r = ovl->disable(ovl);
update_pin(plane, NULL);
queue_work(priv->wq, &omap_plane->work);
}
return r;
}
void omap_plane_on_endwin(struct drm_plane *plane,
void (*fxn)(void *), void *arg)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
mutex_lock(&omap_plane->unpin_mutex);
omap_plane->endwin.fxn = fxn;
omap_plane->endwin.arg = arg;
mutex_unlock(&omap_plane->unpin_mutex);
install_irq(plane);
}
static const struct drm_plane_funcs omap_plane_funcs = {
.update_plane = omap_plane_update,
.disable_plane = omap_plane_disable,
.destroy = omap_plane_destroy,
};
/* initialize plane */
struct drm_plane *omap_plane_init(struct drm_device *dev,
struct omap_overlay *ovl, unsigned int possible_crtcs,
bool priv)
{
struct drm_plane *plane = NULL;
struct omap_plane *omap_plane;
int ret;
DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
possible_crtcs, priv);
/* friendly reminder to update table for future hw: */
WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
if (!omap_plane) {
dev_err(dev->dev, "could not allocate plane\n");
goto fail;
}
mutex_init(&omap_plane->unpin_mutex);
ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
if (ret) {
dev_err(dev->dev, "could not allocate unpin FIFO\n");
goto fail;
}
INIT_WORK(&omap_plane->work, unpin_worker);
omap_plane->nformats = omap_framebuffer_get_formats(
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
ovl->supported_modes);
omap_plane->ovl = ovl;
plane = &omap_plane->base;
drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
omap_plane->formats, omap_plane->nformats, priv);
/* get our starting configuration, set defaults for parameters
* we don't currently use, etc:
*/
ovl->get_overlay_info(ovl, &omap_plane->info);
omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
omap_plane->info.rotation = OMAP_DSS_ROT_0;
omap_plane->info.global_alpha = 0xff;
omap_plane->info.mirror = 0;
omap_plane->info.mirror = 0;
/* Set defaults depending on whether we are a CRTC or overlay
* layer.
* TODO add ioctl to give userspace an API to change this.. this
* will come in a subsequent patch.
*/
if (priv)
omap_plane->info.zorder = 0;
else
omap_plane->info.zorder = ovl->id;
update_manager(plane);
return plane;
fail:
if (plane) {
omap_plane_destroy(plane);
}
return NULL;
}
| gpl-2.0 |
SlimKat-U8950/chil360-kernel | drivers/staging/rtl8192e/rtl819x_TSProc.c | 4941 | 15523 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
static void TsSetupTimeOut(unsigned long data)
{
}
static void TsInactTimeout(unsigned long data)
{
}
static void RxPktPendingTimeout(unsigned long data)
{
struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
struct rx_reorder_entry *pReorderEntry = NULL;
unsigned long flags = 0;
u8 index = 0;
bool bPktInBuf = false;
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
if (pRxTs->RxTimeoutIndicateSeq != 0xffff) {
while (!list_empty(&pRxTs->RxPendingPktList)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTs->RxPendingPktList.prev,
struct rx_reorder_entry, List);
if (index == 0)
pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
if (SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) {
list_del_init(&pReorderEntry->List);
if (SN_EQUAL(pReorderEntry->SeqNum,
pRxTs->RxIndicateSeq))
pRxTs->RxIndicateSeq =
(pRxTs->RxIndicateSeq + 1) % 4096;
RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate"
" SeqNum: %d\n", __func__,
pReorderEntry->SeqNum);
ieee->stats_IndicateArray[index] =
pReorderEntry->prxb;
index++;
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
}
}
}
if (index > 0) {
pRxTs->RxTimeoutIndicateSeq = 0xffff;
if (index > REORDER_WIN_SIZE) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
" Rx Reorer struct buffer full!!\n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock),
flags);
return;
}
rtllib_indicate_packets(ieee, ieee->stats_IndicateArray, index);
bPktInBuf = false;
}
if (bPktInBuf && (pRxTs->RxTimeoutIndicateSeq == 0xffff)) {
pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
mod_timer(&pRxTs->RxPktPendingTimer, jiffies +
MSECS(ieee->pHTInfo->RxReorderPendingTime));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
static void TsAddBaProcess(unsigned long data)
{
struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
u8 num = pTxTs->num;
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsAddBaProcess(): ADDBA Req is "
"started!!\n");
}
static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
memset(pTsCommonInfo->Addr, 0, 6);
memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM);
pTsCommonInfo->TClasProc = 0;
pTsCommonInfo->TClasNum = 0;
}
static void ResetTxTsEntry(struct tx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->TxCurSeq = 0;
pTS->bAddBaReqInProgress = false;
pTS->bAddBaReqDelayed = false;
pTS->bUsingBa = false;
pTS->bDisable_AddBa = false;
ResetBaEntry(&pTS->TxAdmittedBARecord);
ResetBaEntry(&pTS->TxPendingBARecord);
}
static void ResetRxTsEntry(struct rx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->RxIndicateSeq = 0xffff;
pTS->RxTimeoutIndicateSeq = 0xffff;
ResetBaEntry(&pTS->RxAdmittedBARecord);
}
void TSInitialize(struct rtllib_device *ieee)
{
struct tx_ts_record *pTxTS = ieee->TxTsRecord;
struct rx_ts_record *pRxTS = ieee->RxTsRecord;
struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
RTLLIB_DEBUG(RTLLIB_DL_TS, "==========>%s()\n", __func__);
INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
_setup_timer(&pTxTS->TsCommonInfo.SetupTimer,
TsSetupTimeOut,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TsCommonInfo.InactTimer,
TsInactTimeout,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TsAddBaTimer,
TsAddBaProcess,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TxPendingBARecord.Timer,
BaSetupTimeOut,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
TxBaInactTimeout,
(unsigned long) pTxTS);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
&ieee->Tx_TS_Unused_List);
pTxTS++;
}
INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
_setup_timer(&pRxTS->TsCommonInfo.SetupTimer,
TsSetupTimeOut,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->TsCommonInfo.InactTimer,
TsInactTimeout,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
RxBaInactTimeout,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->RxPktPendingTimer,
RxPktPendingTimeout,
(unsigned long) pRxTS);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List,
&ieee->Rx_TS_Unused_List);
pRxTS++;
}
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
if (count == (REORDER_ENTRY_NUM-1))
break;
pRxReorderEntry = &ieee->RxReorderEntry[count+1];
}
}
static void AdmitTS(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo, u32 InactTime)
{
del_timer_sync(&pTsCommonInfo->SetupTimer);
del_timer_sync(&pTsCommonInfo->InactTimer);
if (InactTime != 0)
mod_timer(&pTsCommonInfo->InactTimer, jiffies +
MSECS(InactTime));
}
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *Addr, u8 TID,
enum tr_select TxRxSelect)
{
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
if (ieee->iw_mode == IW_MODE_MASTER) {
if (TxRxSelect == TX_DIR) {
search_dir[DIR_DOWN] = true;
search_dir[DIR_BI_DIR] = true;
} else {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
}
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
if (TxRxSelect == TX_DIR)
search_dir[DIR_UP] = true;
else
search_dir[DIR_DOWN] = true;
} else {
if (TxRxSelect == TX_DIR) {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
} else {
search_dir[DIR_DOWN] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
}
}
if (TxRxSelect == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
for (dir = 0; dir <= DIR_BI_DIR; dir++) {
if (search_dir[dir] == false)
continue;
list_for_each_entry(pRet, psearch_list, List) {
if (memcmp(pRet->Addr, Addr, 6) == 0)
if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
if (pRet->TSpec.f.TSInfo.field.ucDirection == dir)
break;
}
if (&pRet->List != psearch_list)
break;
}
if (pRet && &pRet->List != psearch_list)
return pRet ;
else
return NULL;
}
static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
union tspec_body *pTSPEC, union qos_tclas *pTCLAS,
u8 TCLAS_Num, u8 TCLAS_Proc)
{
u8 count;
if (pTsCommonInfo == NULL)
return;
memcpy(pTsCommonInfo->Addr, Addr, 6);
if (pTSPEC != NULL)
memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
sizeof(union tspec_body));
for (count = 0; count < TCLAS_Num; count++)
memcpy((u8 *)(&(pTsCommonInfo->TClass[count])),
(u8 *)pTCLAS, sizeof(union qos_tclas));
pTsCommonInfo->TClasProc = TCLAS_Proc;
pTsCommonInfo->TClasNum = TCLAS_Num;
}
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
u8 UP = 0;
if (is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr)) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! get TS for Broadcast or "
"Multicast\n");
return false;
}
if (ieee->current_network.qos_data.supported == 0) {
UP = 0;
} else {
if (!IsACValid(TID)) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! in %s(), TID(%d) is "
"not valid\n", __func__, TID);
return false;
}
switch (TID) {
case 0:
case 3:
UP = 0;
break;
case 1:
case 2:
UP = 2;
break;
case 4:
case 5:
UP = 5;
break;
case 6:
case 7:
UP = 7;
break;
}
}
*ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect);
if (*ppTS != NULL) {
return true;
} else {
if (bAddNewTs == false) {
RTLLIB_DEBUG(RTLLIB_DL_TS, "add new TS failed"
"(tid:%d)\n", UP);
return false;
} else {
union tspec_body TSpec;
union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo;
struct list_head *pUnusedList =
(TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
struct list_head *pAddmitList =
(TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
enum direction_value Dir =
(ieee->iw_mode == IW_MODE_MASTER) ?
((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
RTLLIB_DEBUG(RTLLIB_DL_TS, "to add Ts\n");
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
struct ts_common_info, List);
list_del_init(&(*ppTS)->List);
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
TsCommonInfo);
ResetTxTsEntry(tmp);
} else {
struct rx_ts_record *tmp =
container_of(*ppTS,
struct rx_ts_record,
TsCommonInfo);
ResetRxTsEntry(tmp);
}
RTLLIB_DEBUG(RTLLIB_DL_TS, "to init current TS"
", UP:%d, Dir:%d, addr: %pM"
" ppTs=%p\n", UP, Dir,
Addr, *ppTS);
pTSInfo->field.ucTrafficType = 0;
pTSInfo->field.ucTSID = UP;
pTSInfo->field.ucDirection = Dir;
pTSInfo->field.ucAccessPolicy = 1;
pTSInfo->field.ucAggregation = 0;
pTSInfo->field.ucPSB = 0;
pTSInfo->field.ucUP = UP;
pTSInfo->field.ucTSInfoAckPolicy = 0;
pTSInfo->field.ucSchedule = 0;
MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
AdmitTS(ieee, *ppTS, 0);
list_add_tail(&((*ppTS)->List), pAddmitList);
return true;
} else {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!in function "
"%s() There is not enough dir=%d"
"(0=up down=1) TS record to be "
"used!!", __func__, Dir);
return false;
}
}
}
}
static void RemoveTsEntry(struct rtllib_device *ieee, struct ts_common_info *pTs,
enum tr_select TxRxSelect)
{
del_timer_sync(&pTs->SetupTimer);
del_timer_sync(&pTs->InactTimer);
TsInitDelBA(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
if (timer_pending(&pRxTS->RxPktPendingTimer))
del_timer_sync(&pRxTS->RxPktPendingTimer);
while (!list_empty(&pRxTS->RxPendingPktList)) {
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTS->RxPendingPktList.prev,
struct rx_reorder_entry, List);
RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Delete SeqNum "
"%d!\n", __func__,
pRxReorderEntry->SeqNum);
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
if (unlikely(!prxb))
return;
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
}
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
del_timer_sync(&pTxTS->TsAddBaTimer);
}
}
void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
{
struct ts_common_info *pTS, *pTmpTS;
printk(KERN_INFO "===========>RemovePeerTS, %pM\n", Addr);
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
printk(KERN_INFO "====>remove Tx_TS_admin_list\n");
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
}
EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
struct ts_common_info *pTS, *pTmpTS;
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
{
if (pTxTS->bAddBaReqInProgress == false) {
pTxTS->bAddBaReqInProgress = true;
if (pTxTS->bAddBaReqDelayed) {
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
"Delayed Start ADDBA after 60 sec!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies +
MSECS(TS_ADDBA_DELAY));
} else {
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
"Immediately Start ADDBA now!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies+10);
}
} else
RTLLIB_DEBUG(RTLLIB_DL_BA, "%s()==>BA timer is already added\n",
__func__);
}
| gpl-2.0 |
Tepira/linux-sunxi | sound/pci/au88x0/au88x0_game.c | 5197 | 3681 | /*
* Manuel Jander.
*
* Based on the work of:
* Vojtech Pavlik
* Raymond Ingles
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
* Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*
* Based 90% on Vojtech Pavlik pcigame driver.
* Merged and modified by Manuel Jander, for the OpenVortex
* driver. (email: mjander@embedded.cl).
*/
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <sound/core.h>
#include "au88x0.h"
#include <linux/gameport.h>
#include <linux/export.h>
#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
#define VORTEX_GAME_DWAIT 20 /* 20 ms */
static unsigned char vortex_game_read(struct gameport *gameport)
{
vortex_t *vortex = gameport_get_port_data(gameport);
return hwread(vortex->mmio, VORTEX_GAME_LEGACY);
}
static void vortex_game_trigger(struct gameport *gameport)
{
vortex_t *vortex = gameport_get_port_data(gameport);
hwwrite(vortex->mmio, VORTEX_GAME_LEGACY, 0xff);
}
static int
vortex_game_cooked_read(struct gameport *gameport, int *axes, int *buttons)
{
vortex_t *vortex = gameport_get_port_data(gameport);
int i;
*buttons = (~hwread(vortex->mmio, VORTEX_GAME_LEGACY) >> 4) & 0xf;
for (i = 0; i < 4; i++) {
axes[i] =
hwread(vortex->mmio, VORTEX_GAME_AXIS + (i * AXIS_SIZE));
if (axes[i] == AXIS_RANGE)
axes[i] = -1;
}
return 0;
}
static int vortex_game_open(struct gameport *gameport, int mode)
{
vortex_t *vortex = gameport_get_port_data(gameport);
switch (mode) {
case GAMEPORT_MODE_COOKED:
hwwrite(vortex->mmio, VORTEX_CTRL2,
hwread(vortex->mmio,
VORTEX_CTRL2) | CTRL2_GAME_ADCMODE);
msleep(VORTEX_GAME_DWAIT);
return 0;
case GAMEPORT_MODE_RAW:
hwwrite(vortex->mmio, VORTEX_CTRL2,
hwread(vortex->mmio,
VORTEX_CTRL2) & ~CTRL2_GAME_ADCMODE);
return 0;
default:
return -1;
}
return 0;
}
static int __devinit vortex_gameport_register(vortex_t * vortex)
{
struct gameport *gp;
vortex->gameport = gp = gameport_allocate_port();
if (!gp) {
printk(KERN_ERR "vortex: cannot allocate memory for gameport\n");
return -ENOMEM;
};
gameport_set_name(gp, "AU88x0 Gameport");
gameport_set_phys(gp, "pci%s/gameport0", pci_name(vortex->pci_dev));
gameport_set_dev_parent(gp, &vortex->pci_dev->dev);
gp->read = vortex_game_read;
gp->trigger = vortex_game_trigger;
gp->cooked_read = vortex_game_cooked_read;
gp->open = vortex_game_open;
gameport_set_port_data(gp, vortex);
gp->fuzz = 64;
gameport_register_port(gp);
return 0;
}
static void vortex_gameport_unregister(vortex_t * vortex)
{
if (vortex->gameport) {
gameport_unregister_port(vortex->gameport);
vortex->gameport = NULL;
}
}
#else
static inline int vortex_gameport_register(vortex_t * vortex) { return -ENOSYS; }
static inline void vortex_gameport_unregister(vortex_t * vortex) { }
#endif
| gpl-2.0 |
pekaka/N900-Exynos-kernel-4.3 | drivers/staging/comedi/drivers/usbdux.c | 5453 | 78743 | #define DRIVER_VERSION "v2.4"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"
/*
comedi/drivers/usbdux.c
Copyright (C) 2003-2007 Bernd Porr, Bernd.Porr@f2s.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: usbdux
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbdux.o)
Author: Bernd Porr <BerndPorr@f2s.com>
Updated: 8 Dec 2008
Status: Stable
Configuration options:
You have to upload firmware with the -i option. The
firmware is usually installed under /usr/share/usb or
/usr/local/share/usb or /lib/firmware.
Connection scheme for the counter at the digital port:
0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1.
The sampling rate of the counter is approximately 500Hz.
Please note that under USB2.0 the length of the channel list determines
the max sampling rate. If you sample only one channel you get 8kHz
sampling rate. If you sample two channels you get 4kHz and so on.
*/
/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
* driver. I also must give credits to David Brownell
* who supported me with the USB development.
*
* Bernd Porr
*
*
* Revision history:
* 0.94: D/A output should work now with any channel list combinations
* 0.95: .owner commented out for kernel vers below 2.4.19
* sanity checks in ai/ao_cmd
* 0.96: trying to get it working with 2.6, moved all memory alloc to comedi's
* attach final USB IDs
* moved memory allocation completely to the corresponding comedi
* functions firmware upload is by fxload and no longer by comedi (due to
* enumeration)
* 0.97: USB IDs received, adjusted table
* 0.98: SMP, locking, memroy alloc: moved all usb memory alloc
* to the usb subsystem and moved all comedi related memory
* alloc to comedi.
* | kernel | registration | usbdux-usb | usbdux-comedi | comedi |
* 0.99: USB 2.0: changed protocol to isochronous transfer
* IRQ transfer is too buggy and too risky in 2.0
* for the high speed ISO transfer is now a working version
* available
* 0.99b: Increased the iso transfer buffer for high sp.to 10 buffers. Some VIA
* chipsets miss out IRQs. Deeper buffering is needed.
* 1.00: full USB 2.0 support for the A/D converter. Now: max 8kHz sampling
* rate.
* Firmware vers 1.00 is needed for this.
* Two 16 bit up/down/reset counter with a sampling rate of 1kHz
* And loads of cleaning up, in particular streamlining the
* bulk transfers.
* 1.1: moved EP4 transfers to EP1 to make space for a PWM output on EP4
* 1.2: added PWM suport via EP4
* 2.0: PWM seems to be stable and is not interfering with the other functions
* 2.1: changed PWM API
* 2.2: added firmware kernel request to fix an udev problem
* 2.3: corrected a bug in bulk timeouts which were far too short
* 2.4: fixed a bug which causes the driver to hang when it ran out of data.
* Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it.
*
*/
/* generates loads of debug info */
/* #define NOISY_DUX_DEBUGBUG */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
#include <linux/firmware.h>
#include "../comedidev.h"
#define BOARDNAME "usbdux"
/* timeout for the USB-transfer in ms*/
#define BULK_TIMEOUT 1000
/* constants for "firmware" upload and download */
#define USBDUXSUB_FIRMWARE 0xA0
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
/* internal addresses of the 8051 processor */
#define USBDUXSUB_CPUCS 0xE600
/*
* the minor device number, major is 180 only for debugging purposes and to
* upload special firmware (programming the eeprom etc) which is not compatible
* with the comedi framwork
*/
#define USBDUXSUB_MINOR 32
/* max lenghth of the transfer-buffer for software upload */
#define TB_LEN 0x2000
/* Input endpoint number: ISO/IRQ */
#define ISOINEP 6
/* Output endpoint number: ISO/IRQ */
#define ISOOUTEP 2
/* This EP sends DUX commands to USBDUX */
#define COMMAND_OUT_EP 1
/* This EP receives the DUX commands from USBDUX */
#define COMMAND_IN_EP 8
/* Output endpoint for PWM */
#define PWM_EP 4
/* 300Hz max frequ under PWM */
#define MIN_PWM_PERIOD ((long)(1E9/300))
/* Default PWM frequency */
#define PWM_DEFAULT_PERIOD ((long)(1E9/100))
/* Number of channels */
#define NUMCHANNELS 8
/* Size of one A/D value */
#define SIZEADIN ((sizeof(int16_t)))
/*
* Size of the input-buffer IN BYTES
* Always multiple of 8 for 8 microframes which is needed in the highspeed mode
*/
#define SIZEINBUF ((8*SIZEADIN))
/* 16 bytes. */
#define SIZEINSNBUF 16
/* Number of DA channels */
#define NUMOUTCHANNELS 8
/* size of one value for the D/A converter: channel and value */
#define SIZEDAOUT ((sizeof(int8_t)+sizeof(int16_t)))
/*
* Size of the output-buffer in bytes
* Actually only the first 4 triplets are used but for the
* high speed mode we need to pad it to 8 (microframes).
*/
#define SIZEOUTBUF ((8*SIZEDAOUT))
/*
* Size of the buffer for the dux commands: just now max size is determined
* by the analogue out + command byte + panic bytes...
*/
#define SIZEOFDUXBUFFER ((8*SIZEDAOUT+2))
/* Number of in-URBs which receive the data: min=2 */
#define NUMOFINBUFFERSFULL 5
/* Number of out-URBs which send the data: min=2 */
#define NUMOFOUTBUFFERSFULL 5
/* Number of in-URBs which receive the data: min=5 */
/* must have more buffers due to buggy USB ctr */
#define NUMOFINBUFFERSHIGH 10
/* Number of out-URBs which send the data: min=5 */
/* must have more buffers due to buggy USB ctr */
#define NUMOFOUTBUFFERSHIGH 10
/* Total number of usbdux devices */
#define NUMUSBDUX 16
/* Analogue in subdevice */
#define SUBDEV_AD 0
/* Analogue out subdevice */
#define SUBDEV_DA 1
/* Digital I/O */
#define SUBDEV_DIO 2
/* counter */
#define SUBDEV_COUNTER 3
/* timer aka pwm output */
#define SUBDEV_PWM 4
/* number of retries to get the right dux command */
#define RETRIES 10
/**************************************************/
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 4, {
BIP_RANGE
(4.096),
BIP_RANGE(4.096
/ 2),
UNI_RANGE
(4.096),
UNI_RANGE(4.096
/ 2)
}
};
static const struct comedi_lrange range_usbdux_ao_range = { 2, {
BIP_RANGE
(4.096),
UNI_RANGE
(4.096),
}
};
/*
* private structure of one subdevice
*/
/*
* This is the structure which holds all the data of
* this driver one sub device just now: A/D
*/
struct usbduxsub {
/* attached? */
int attached;
/* is it associated with a subdevice? */
int probed;
/* pointer to the usb-device */
struct usb_device *usbdev;
/* actual number of in-buffers */
int numOfInBuffers;
/* actual number of out-buffers */
int numOfOutBuffers;
/* ISO-transfer handling: buffers */
struct urb **urbIn;
struct urb **urbOut;
/* pwm-transfer handling */
struct urb *urbPwm;
/* PWM period */
unsigned int pwmPeriod;
/* PWM internal delay for the GPIF in the FX2 */
int8_t pwmDelay;
/* size of the PWM buffer which holds the bit pattern */
int sizePwmBuf;
/* input buffer for the ISO-transfer */
int16_t *inBuffer;
/* input buffer for single insn */
int16_t *insnBuffer;
/* output buffer for single DA outputs */
int16_t *outBuffer;
/* interface number */
int ifnum;
/* interface structure in 2.6 */
struct usb_interface *interface;
/* comedi device for the interrupt context */
struct comedi_device *comedidev;
/* is it USB_SPEED_HIGH or not? */
short int high_speed;
/* asynchronous command is running */
short int ai_cmd_running;
short int ao_cmd_running;
/* pwm is running */
short int pwm_cmd_running;
/* continous acquisition */
short int ai_continous;
short int ao_continous;
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
/* time between samples in units of the timer */
unsigned int ai_timer;
unsigned int ao_timer;
/* counter between aquisitions */
unsigned int ai_counter;
unsigned int ao_counter;
/* interval in frames/uframes */
unsigned int ai_interval;
/* D/A commands */
int8_t *dac_commands;
/* commands */
int8_t *dux_commands;
struct semaphore sem;
};
/*
* The pointer to the private usb-data of the driver is also the private data
* for the comedi-device. This has to be global as the usb subsystem needs
* global variables. The other reason is that this structure must be there
* _before_ any comedi command is issued. The usb subsystem must be initialised
* before comedi can access it.
*/
static struct usbduxsub usbduxsub[NUMUSBDUX];
static DEFINE_SEMAPHORE(start_stop_sem);
/*
* Stops the data acquision
* It should be safe to call this function from any context
*/
static int usbduxsub_unlink_InURBs(struct usbduxsub *usbduxsub_tmp)
{
int i = 0;
int err = 0;
if (usbduxsub_tmp && usbduxsub_tmp->urbIn) {
for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
if (usbduxsub_tmp->urbIn[i]) {
/* We wait here until all transfers have been
* cancelled. */
usb_kill_urb(usbduxsub_tmp->urbIn[i]);
}
dev_dbg(&usbduxsub_tmp->interface->dev,
"comedi: usbdux: unlinked InURB %d, err=%d\n",
i, err);
}
}
return err;
}
/*
* This will stop a running acquisition operation
* Is called from within this driver from both the
* interrupt context and from comedi
*/
static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
{
int ret = 0;
if (!this_usbduxsub) {
pr_err("comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n");
return -EFAULT;
}
dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n");
if (do_unlink) {
/* stop aquistion */
ret = usbduxsub_unlink_InURBs(this_usbduxsub);
}
this_usbduxsub->ai_cmd_running = 0;
return ret;
}
/*
* This will cancel a running acquisition operation.
* This is called by comedi but never from inside the driver.
*/
static int usbdux_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct usbduxsub *this_usbduxsub;
int res = 0;
/* force unlink of all urbs */
this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n");
/* prevent other CPUs from submitting new commands just now */
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
/* unlink only if the urb really has been submitted */
res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running);
up(&this_usbduxsub->sem);
return res;
}
/* analogue IN - interrupt service routine */
static void usbduxsub_ai_IsocIrq(struct urb *urb)
{
int i, err, n;
struct usbduxsub *this_usbduxsub;
struct comedi_device *this_comedidev;
struct comedi_subdevice *s;
/* the context variable points to the subdevice */
this_comedidev = urb->context;
/* the private structure of the subdevice is struct usbduxsub */
this_usbduxsub = this_comedidev->private;
/* subdevice which is the AD converter */
s = this_comedidev->subdevices + SUBDEV_AD;
/* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
memcpy(this_usbduxsub->inBuffer,
urb->transfer_buffer, SIZEINBUF);
break;
case -EILSEQ:
/* error in the ISOchronous data */
/* we don't copy the data into the transfer buffer */
/* and recycle the last data byte */
dev_dbg(&urb->dev->dev,
"comedi%d: usbdux: CRC error in ISO IN stream.\n",
this_usbduxsub->comedidev->minor);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
if (this_usbduxsub->ai_cmd_running) {
/* we are still running a command */
/* tell this comedi */
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
comedi_event(this_usbduxsub->comedidev, s);
/* stop the transfer w/o unlink */
usbdux_ai_stop(this_usbduxsub, 0);
}
return;
default:
/* a real error on the bus */
/* pass error to comedi if we are really running a command */
if (this_usbduxsub->ai_cmd_running) {
dev_err(&urb->dev->dev,
"Non-zero urb status received in ai intr "
"context: %d\n", urb->status);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
comedi_event(this_usbduxsub->comedidev, s);
/* don't do an unlink here */
usbdux_ai_stop(this_usbduxsub, 0);
}
return;
}
/*
* at this point we are reasonably sure that nothing dodgy has happened
* are we running a command?
*/
if (unlikely((!(this_usbduxsub->ai_cmd_running)))) {
/*
* not running a command, do not continue execution if no
* asynchronous command is running in particular not resubmit
*/
return;
}
urb->dev = this_usbduxsub->usbdev;
/* resubmit the urb */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err < 0)) {
dev_err(&urb->dev->dev,
"comedi_: urb resubmit failed in int-context! err=%d\n",
err);
if (err == -EL2NSYNC)
dev_err(&urb->dev->dev,
"buggy USB host controller or bug in IRQ "
"handler!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
comedi_event(this_usbduxsub->comedidev, s);
/* don't do an unlink here */
usbdux_ai_stop(this_usbduxsub, 0);
return;
}
this_usbduxsub->ai_counter--;
if (likely(this_usbduxsub->ai_counter > 0))
return;
/* timer zero, transfer measurements to comedi */
this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
/* test, if we transmit only a fixed number of samples */
if (!(this_usbduxsub->ai_continous)) {
/* not continuous, fixed number of samples */
this_usbduxsub->ai_sample_count--;
/* all samples received? */
if (this_usbduxsub->ai_sample_count < 0) {
/* prevent a resubmit next time */
usbdux_ai_stop(this_usbduxsub, 0);
/* say comedi that the acquistion is over */
s->async->events |= COMEDI_CB_EOA;
comedi_event(this_usbduxsub->comedidev, s);
return;
}
}
/* get the data from the USB bus and hand it over to comedi */
n = s->async->cmd.chanlist_len;
for (i = 0; i < n; i++) {
/* transfer data */
if (CR_RANGE(s->async->cmd.chanlist[i]) <= 1) {
err = comedi_buf_put
(s->async,
le16_to_cpu(this_usbduxsub->inBuffer[i]) ^ 0x800);
} else {
err = comedi_buf_put
(s->async,
le16_to_cpu(this_usbduxsub->inBuffer[i]));
}
if (unlikely(err == 0)) {
/* buffer overflow */
usbdux_ai_stop(this_usbduxsub, 0);
return;
}
}
/* tell comedi that data is there */
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(this_usbduxsub->comedidev, s);
}
static int usbduxsub_unlink_OutURBs(struct usbduxsub *usbduxsub_tmp)
{
int i = 0;
int err = 0;
if (usbduxsub_tmp && usbduxsub_tmp->urbOut) {
for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
if (usbduxsub_tmp->urbOut[i])
usb_kill_urb(usbduxsub_tmp->urbOut[i]);
dev_dbg(&usbduxsub_tmp->interface->dev,
"comedi: usbdux: unlinked OutURB %d: res=%d\n",
i, err);
}
}
return err;
}
/* This will cancel a running acquisition operation
* in any context.
*/
static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
{
int ret = 0;
if (!this_usbduxsub)
return -EFAULT;
dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n");
if (do_unlink)
ret = usbduxsub_unlink_OutURBs(this_usbduxsub);
this_usbduxsub->ao_cmd_running = 0;
return ret;
}
/* force unlink, is called by comedi */
static int usbdux_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct usbduxsub *this_usbduxsub = dev->private;
int res = 0;
if (!this_usbduxsub)
return -EFAULT;
/* prevent other CPUs from submitting a command just now */
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
/* unlink only if it is really running */
res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running);
up(&this_usbduxsub->sem);
return res;
}
static void usbduxsub_ao_IsocIrq(struct urb *urb)
{
int i, ret;
int8_t *datap;
struct usbduxsub *this_usbduxsub;
struct comedi_device *this_comedidev;
struct comedi_subdevice *s;
/* the context variable points to the subdevice */
this_comedidev = urb->context;
/* the private structure of the subdevice is struct usbduxsub */
this_usbduxsub = this_comedidev->private;
s = this_comedidev->subdevices + SUBDEV_DA;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
/* after an unlink command, unplug, ... etc */
/* no unlink needed here. Already shutting down. */
if (this_usbduxsub->ao_cmd_running) {
s->async->events |= COMEDI_CB_EOA;
comedi_event(this_usbduxsub->comedidev, s);
usbdux_ao_stop(this_usbduxsub, 0);
}
return;
default:
/* a real error */
if (this_usbduxsub->ao_cmd_running) {
dev_err(&urb->dev->dev,
"comedi_: Non-zero urb status received in ao "
"intr context: %d\n", urb->status);
s->async->events |= COMEDI_CB_ERROR;
s->async->events |= COMEDI_CB_EOA;
comedi_event(this_usbduxsub->comedidev, s);
/* we do an unlink if we are in the high speed mode */
usbdux_ao_stop(this_usbduxsub, 0);
}
return;
}
/* are we actually running? */
if (!(this_usbduxsub->ao_cmd_running))
return;
/* normal operation: executing a command in this subdevice */
this_usbduxsub->ao_counter--;
if ((int)this_usbduxsub->ao_counter <= 0) {
/* timer zero */
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
/* handle non continous acquisition */
if (!(this_usbduxsub->ao_continous)) {
/* fixed number of samples */
this_usbduxsub->ao_sample_count--;
if (this_usbduxsub->ao_sample_count < 0) {
/* all samples transmitted */
usbdux_ao_stop(this_usbduxsub, 0);
s->async->events |= COMEDI_CB_EOA;
comedi_event(this_usbduxsub->comedidev, s);
/* no resubmit of the urb */
return;
}
}
/* transmit data to the USB bus */
((uint8_t *) (urb->transfer_buffer))[0] =
s->async->cmd.chanlist_len;
for (i = 0; i < s->async->cmd.chanlist_len; i++) {
short temp;
if (i >= NUMOUTCHANNELS)
break;
/* pointer to the DA */
datap =
(&(((int8_t *) urb->transfer_buffer)[i * 3 + 1]));
/* get the data from comedi */
ret = comedi_buf_get(s->async, &temp);
datap[0] = temp;
datap[1] = temp >> 8;
datap[2] = this_usbduxsub->dac_commands[i];
/* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */
/* datap[0],datap[1],datap[2]); */
if (ret < 0) {
dev_err(&urb->dev->dev,
"comedi: buffer underflow\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_OVERFLOW;
}
/* transmit data to comedi */
s->async->events |= COMEDI_CB_BLOCK;
comedi_event(this_usbduxsub->comedidev, s);
}
}
urb->transfer_buffer_length = SIZEOUTBUF;
urb->dev = this_usbduxsub->usbdev;
urb->status = 0;
if (this_usbduxsub->ao_cmd_running) {
if (this_usbduxsub->high_speed) {
/* uframes */
urb->interval = 8;
} else {
/* frames */
urb->interval = 1;
}
urb->number_of_packets = 1;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = SIZEOUTBUF;
urb->iso_frame_desc[0].status = 0;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
dev_err(&urb->dev->dev,
"comedi_: ao urb resubm failed in int-cont. "
"ret=%d", ret);
if (ret == EL2NSYNC)
dev_err(&urb->dev->dev,
"buggy USB host controller or bug in "
"IRQ handling!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
comedi_event(this_usbduxsub->comedidev, s);
/* don't do an unlink here */
usbdux_ao_stop(this_usbduxsub, 0);
}
}
}
static int usbduxsub_start(struct usbduxsub *usbduxsub)
{
int errcode = 0;
uint8_t local_transfer_buffer[16];
/* 7f92 to zero */
local_transfer_buffer[0] = 0;
errcode = usb_control_msg(usbduxsub->usbdev,
/* create a pipe for a control transfer */
usb_sndctrlpipe(usbduxsub->usbdev, 0),
/* bRequest, "Firmware" */
USBDUXSUB_FIRMWARE,
/* bmRequestType */
VENDOR_DIR_OUT,
/* Value */
USBDUXSUB_CPUCS,
/* Index */
0x0000,
/* address of the transfer buffer */
local_transfer_buffer,
/* Length */
1,
/* Timeout */
BULK_TIMEOUT);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: control msg failed (start)\n");
return errcode;
}
return 0;
}
static int usbduxsub_stop(struct usbduxsub *usbduxsub)
{
int errcode = 0;
uint8_t local_transfer_buffer[16];
/* 7f92 to one */
local_transfer_buffer[0] = 1;
errcode = usb_control_msg(usbduxsub->usbdev,
usb_sndctrlpipe(usbduxsub->usbdev, 0),
/* bRequest, "Firmware" */
USBDUXSUB_FIRMWARE,
/* bmRequestType */
VENDOR_DIR_OUT,
/* Value */
USBDUXSUB_CPUCS,
/* Index */
0x0000, local_transfer_buffer,
/* Length */
1,
/* Timeout */
BULK_TIMEOUT);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: control msg failed (stop)\n");
return errcode;
}
return 0;
}
static int usbduxsub_upload(struct usbduxsub *usbduxsub,
uint8_t *local_transfer_buffer,
unsigned int startAddr, unsigned int len)
{
int errcode;
errcode = usb_control_msg(usbduxsub->usbdev,
usb_sndctrlpipe(usbduxsub->usbdev, 0),
/* brequest, firmware */
USBDUXSUB_FIRMWARE,
/* bmRequestType */
VENDOR_DIR_OUT,
/* value */
startAddr,
/* index */
0x0000,
/* our local safe buffer */
local_transfer_buffer,
/* length */
len,
/* timeout */
BULK_TIMEOUT);
dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
if (errcode < 0) {
dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n");
return errcode;
}
return 0;
}
#define FIRMWARE_MAX_LEN 0x2000
static int firmwareUpload(struct usbduxsub *usbduxsub,
const u8 *firmwareBinary, int sizeFirmware)
{
int ret;
uint8_t *fwBuf;
if (!firmwareBinary)
return 0;
if (sizeFirmware > FIRMWARE_MAX_LEN) {
dev_err(&usbduxsub->interface->dev,
"usbdux firmware binary it too large for FX2.\n");
return -ENOMEM;
}
/* we generate a local buffer for the firmware */
fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
if (!fwBuf) {
dev_err(&usbduxsub->interface->dev,
"comedi_: mem alloc for firmware failed\n");
return -ENOMEM;
}
ret = usbduxsub_stop(usbduxsub);
if (ret < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: can not stop firmware\n");
kfree(fwBuf);
return ret;
}
ret = usbduxsub_upload(usbduxsub, fwBuf, 0, sizeFirmware);
if (ret < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: firmware upload failed\n");
kfree(fwBuf);
return ret;
}
ret = usbduxsub_start(usbduxsub);
if (ret < 0) {
dev_err(&usbduxsub->interface->dev,
"comedi_: can not start firmware\n");
kfree(fwBuf);
return ret;
}
kfree(fwBuf);
return 0;
}
static int usbduxsub_submit_InURBs(struct usbduxsub *usbduxsub)
{
int i, errFlag;
if (!usbduxsub)
return -EFAULT;
/* Submit all URBs and start the transfer on the bus */
for (i = 0; i < usbduxsub->numOfInBuffers; i++) {
/* in case of a resubmission after an unlink... */
usbduxsub->urbIn[i]->interval = usbduxsub->ai_interval;
usbduxsub->urbIn[i]->context = usbduxsub->comedidev;
usbduxsub->urbIn[i]->dev = usbduxsub->usbdev;
usbduxsub->urbIn[i]->status = 0;
usbduxsub->urbIn[i]->transfer_flags = URB_ISO_ASAP;
dev_dbg(&usbduxsub->interface->dev,
"comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n",
usbduxsub->comedidev->minor, i,
(usbduxsub->urbIn[i]->context),
(usbduxsub->urbIn[i]->dev),
(usbduxsub->urbIn[i]->interval));
errFlag = usb_submit_urb(usbduxsub->urbIn[i], GFP_ATOMIC);
if (errFlag) {
dev_err(&usbduxsub->interface->dev,
"comedi_: ai: usb_submit_urb(%d) error %d\n",
i, errFlag);
return errFlag;
}
}
return 0;
}
static int usbduxsub_submit_OutURBs(struct usbduxsub *usbduxsub)
{
int i, errFlag;
if (!usbduxsub)
return -EFAULT;
for (i = 0; i < usbduxsub->numOfOutBuffers; i++) {
dev_dbg(&usbduxsub->interface->dev,
"comedi_: submitting out-urb[%d]\n", i);
/* in case of a resubmission after an unlink... */
usbduxsub->urbOut[i]->context = usbduxsub->comedidev;
usbduxsub->urbOut[i]->dev = usbduxsub->usbdev;
usbduxsub->urbOut[i]->status = 0;
usbduxsub->urbOut[i]->transfer_flags = URB_ISO_ASAP;
errFlag = usb_submit_urb(usbduxsub->urbOut[i], GFP_ATOMIC);
if (errFlag) {
dev_err(&usbduxsub->interface->dev,
"comedi_: ao: usb_submit_urb(%d) error %d\n",
i, errFlag);
return errFlag;
}
}
return 0;
}
static int usbdux_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0, tmp, i;
unsigned int tmpTimer;
struct usbduxsub *this_usbduxsub = dev->private;
if (!(this_usbduxsub->probed))
return -ENODEV;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ai_cmdtest\n", dev->minor);
/* make sure triggers are valid */
/* Only immediate triggers are allowed */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW | TRIG_INT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
/* trigger should happen timed */
tmp = cmd->scan_begin_src;
/* start a new _scan_ with a timer */
cmd->scan_begin_src &= TRIG_TIMER;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
/* scanning is continuous */
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_NOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
/* issue a trigger when scan is finished and start a new scan */
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
/* trigger at the end of count events or not, stop condition or not */
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/*
* step 2: make sure trigger sources are unique and mutually compatible
* note that mutual compatibility is not an issue here
*/
if (cmd->scan_begin_src != TRIG_FOLLOW &&
cmd->scan_begin_src != TRIG_EXT &&
cmd->scan_begin_src != TRIG_TIMER)
err++;
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_src == TRIG_FOLLOW) {
/* internal trigger */
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_TIMER) {
if (this_usbduxsub->high_speed) {
/*
* In high speed mode microframes are possible.
* However, during one microframe we can roughly
* sample one channel. Thus, the more channels
* are in the channel list the more time we need.
*/
i = 1;
/* find a power of 2 for the number of channels */
while (i < (cmd->chanlist_len))
i = i * 2;
if (cmd->scan_begin_arg < (1000000 / 8 * i)) {
cmd->scan_begin_arg = 1000000 / 8 * i;
err++;
}
/* now calc the real sampling rate with all the
* rounding errors */
tmpTimer =
((unsigned int)(cmd->scan_begin_arg / 125000)) *
125000;
if (cmd->scan_begin_arg != tmpTimer) {
cmd->scan_begin_arg = tmpTimer;
err++;
}
} else {
/* full speed */
/* 1kHz scans every USB frame */
if (cmd->scan_begin_arg < 1000000) {
cmd->scan_begin_arg = 1000000;
err++;
}
/*
* calc the real sampling rate with the rounding errors
*/
tmpTimer = ((unsigned int)(cmd->scan_begin_arg /
1000000)) * 1000000;
if (cmd->scan_begin_arg != tmpTimer) {
cmd->scan_begin_arg = tmpTimer;
err++;
}
}
}
/* the same argument */
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
/* any count is allowed */
} else {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
return 0;
}
/*
* creates the ADC command for the MAX1271
* range is the range value from comedi
*/
static int8_t create_adc_command(unsigned int chan, int range)
{
int8_t p = (range <= 1);
int8_t r = ((range % 2) == 0);
return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
}
/* bulk transfers to usbdux */
#define SENDADCOMMANDS 0
#define SENDDACOMMANDS 1
#define SENDDIOCONFIGCOMMAND 2
#define SENDDIOBITSCOMMAND 3
#define SENDSINGLEAD 4
#define READCOUNTERCOMMAND 5
#define WRITECOUNTERCOMMAND 6
#define SENDPWMON 7
#define SENDPWMOFF 8
static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
{
int result, nsent;
this_usbduxsub->dux_commands[0] = cmd_type;
#ifdef NOISY_DUX_DEBUGBUG
printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ",
this_usbduxsub->comedidev->minor);
for (result = 0; result < SIZEOFDUXBUFFER; result++)
printk(" %02x", this_usbduxsub->dux_commands[result]);
printk("\n");
#endif
result = usb_bulk_msg(this_usbduxsub->usbdev,
usb_sndbulkpipe(this_usbduxsub->usbdev,
COMMAND_OUT_EP),
this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
&nsent, BULK_TIMEOUT);
if (result < 0)
dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
"could not transmit dux_command to the usb-device, "
"err=%d\n", this_usbduxsub->comedidev->minor, result);
return result;
}
static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
{
int result = (-EFAULT);
int nrec;
int i;
for (i = 0; i < RETRIES; i++) {
result = usb_bulk_msg(this_usbduxsub->usbdev,
usb_rcvbulkpipe(this_usbduxsub->usbdev,
COMMAND_IN_EP),
this_usbduxsub->insnBuffer, SIZEINSNBUF,
&nrec, BULK_TIMEOUT);
if (result < 0) {
dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
"insn: USB error %d while receiving DUX command"
"\n", this_usbduxsub->comedidev->minor, result);
return result;
}
if (le16_to_cpu(this_usbduxsub->insnBuffer[0]) == command)
return result;
}
/* this is only reached if the data has been requested a couple of
* times */
dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: "
"wrong data returned from firmware: want cmd %d, got cmd %d.\n",
this_usbduxsub->comedidev->minor, command,
le16_to_cpu(this_usbduxsub->insnBuffer[0]));
return -EFAULT;
}
static int usbdux_ai_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int trignum)
{
int ret;
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ai_inttrig\n", dev->minor);
if (trignum != 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ai_inttrig: invalid trignum\n",
dev->minor);
up(&this_usbduxsub->sem);
return -EINVAL;
}
if (!(this_usbduxsub->ai_cmd_running)) {
this_usbduxsub->ai_cmd_running = 1;
ret = usbduxsub_submit_InURBs(this_usbduxsub);
if (ret < 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ai_inttrig: "
"urbSubmit: err=%d\n", dev->minor, ret);
this_usbduxsub->ai_cmd_running = 0;
up(&this_usbduxsub->sem);
return ret;
}
s->async->inttrig = NULL;
} else {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: ai_inttrig but acqu is already running\n",
dev->minor);
}
up(&this_usbduxsub->sem);
return 1;
}
static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int chan, range;
int i, ret;
struct usbduxsub *this_usbduxsub = dev->private;
int result;
if (!this_usbduxsub)
return -EFAULT;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ai_cmd\n", dev->minor);
/* block other CPUs from starting an ai_cmd */
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
if (this_usbduxsub->ai_cmd_running) {
dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
"ai_cmd not possible. Another ai_cmd is running.\n",
dev->minor);
up(&this_usbduxsub->sem);
return -EBUSY;
}
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
this_usbduxsub->dux_commands[1] = cmd->chanlist_len;
for (i = 0; i < cmd->chanlist_len; ++i) {
chan = CR_CHAN(cmd->chanlist[i]);
range = CR_RANGE(cmd->chanlist[i]);
if (i >= NUMCHANNELS) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: channel list too long\n",
dev->minor);
break;
}
this_usbduxsub->dux_commands[i + 2] =
create_adc_command(chan, range);
}
dev_dbg(&this_usbduxsub->interface->dev,
"comedi %d: sending commands to the usb device: size=%u\n",
dev->minor, NUMCHANNELS);
result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS);
if (result < 0) {
up(&this_usbduxsub->sem);
return result;
}
if (this_usbduxsub->high_speed) {
/*
* every channel gets a time window of 125us. Thus, if we
* sample all 8 channels we need 1ms. If we sample only one
* channel we need only 125us
*/
this_usbduxsub->ai_interval = 1;
/* find a power of 2 for the interval */
while ((this_usbduxsub->ai_interval) < (cmd->chanlist_len)) {
this_usbduxsub->ai_interval =
(this_usbduxsub->ai_interval) * 2;
}
this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
(this_usbduxsub->
ai_interval));
} else {
/* interval always 1ms */
this_usbduxsub->ai_interval = 1;
this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000;
}
if (this_usbduxsub->ai_timer < 1) {
dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: "
"timer=%d, scan_begin_arg=%d. "
"Not properly tested by cmdtest?\n", dev->minor,
this_usbduxsub->ai_timer, cmd->scan_begin_arg);
up(&this_usbduxsub->sem);
return -EINVAL;
}
this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* data arrives as one packet */
this_usbduxsub->ai_sample_count = cmd->stop_arg;
this_usbduxsub->ai_continous = 0;
} else {
/* continous acquisition */
this_usbduxsub->ai_continous = 1;
this_usbduxsub->ai_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
this_usbduxsub->ai_cmd_running = 1;
ret = usbduxsub_submit_InURBs(this_usbduxsub);
if (ret < 0) {
this_usbduxsub->ai_cmd_running = 0;
/* fixme: unlink here?? */
up(&this_usbduxsub->sem);
return ret;
}
s->async->inttrig = NULL;
} else {
/* TRIG_INT */
/* don't enable the acquision operation */
/* wait for an internal signal */
s->async->inttrig = usbdux_ai_inttrig;
}
up(&this_usbduxsub->sem);
return 0;
}
/* Mode 0 is used to get a single conversion on demand */
static int usbdux_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
unsigned int one = 0;
int chan, range;
int err;
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return 0;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n",
dev->minor, insn->n, insn->subdev);
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
if (this_usbduxsub->ai_cmd_running) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: ai_insn_read not possible. "
"Async Command is running.\n", dev->minor);
up(&this_usbduxsub->sem);
return 0;
}
/* sample one channel */
chan = CR_CHAN(insn->chanspec);
range = CR_RANGE(insn->chanspec);
/* set command for the first channel */
this_usbduxsub->dux_commands[1] = create_adc_command(chan, range);
/* adc commands */
err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
for (i = 0; i < insn->n; i++) {
err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
if (err < 0) {
up(&this_usbduxsub->sem);
return 0;
}
one = le16_to_cpu(this_usbduxsub->insnBuffer[1]);
if (CR_RANGE(insn->chanspec) <= 1)
one = one ^ 0x800;
data[i] = one;
}
up(&this_usbduxsub->sem);
return i;
}
/************************************/
/* analog out */
static int usbdux_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
for (i = 0; i < insn->n; i++)
data[i] = this_usbduxsub->outBuffer[chan];
up(&this_usbduxsub->sem);
return i;
}
static int usbdux_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i, err;
int chan = CR_CHAN(insn->chanspec);
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: ao_insn_write\n", dev->minor);
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
if (this_usbduxsub->ao_cmd_running) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: ao_insn_write: "
"ERROR: asynchronous ao_cmd is running\n", dev->minor);
up(&this_usbduxsub->sem);
return 0;
}
for (i = 0; i < insn->n; i++) {
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n",
dev->minor, chan, i, data[i]);
/* number of channels: 1 */
this_usbduxsub->dux_commands[1] = 1;
/* one 16 bit value */
*((int16_t *) (this_usbduxsub->dux_commands + 2)) =
cpu_to_le16(data[i]);
this_usbduxsub->outBuffer[chan] = data[i];
/* channel number */
this_usbduxsub->dux_commands[4] = (chan << 6);
err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
}
up(&this_usbduxsub->sem);
return i;
}
static int usbdux_ao_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int trignum)
{
int ret;
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
if (trignum != 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: invalid trignum\n",
dev->minor);
up(&this_usbduxsub->sem);
return -EINVAL;
}
if (!(this_usbduxsub->ao_cmd_running)) {
this_usbduxsub->ao_cmd_running = 1;
ret = usbduxsub_submit_OutURBs(this_usbduxsub);
if (ret < 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: submitURB: "
"err=%d\n", dev->minor, ret);
this_usbduxsub->ao_cmd_running = 0;
up(&this_usbduxsub->sem);
return ret;
}
s->async->inttrig = NULL;
} else {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: ao_inttrig but acqu is already running.\n",
dev->minor);
}
up(&this_usbduxsub->sem);
return 1;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0, tmp;
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
if (!(this_usbduxsub->probed))
return -ENODEV;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_cmdtest\n", dev->minor);
/* make sure triggers are valid */
/* Only immediate triggers are allowed */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW | TRIG_INT;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
/* trigger should happen timed */
tmp = cmd->scan_begin_src;
/* just now we scan also in the high speed mode every frame */
/* this is due to ehci driver limitations */
if (0) { /* (this_usbduxsub->high_speed) */
/* start immediately a new scan */
/* the sampling rate is set by the coversion rate */
cmd->scan_begin_src &= TRIG_FOLLOW;
} else {
/* start a new scan (output at once) with a timer */
cmd->scan_begin_src &= TRIG_TIMER;
}
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
/* scanning is continuous */
tmp = cmd->convert_src;
/* we always output at 1kHz just now all channels at once */
if (0) { /* (this_usbduxsub->high_speed) */
/*
* in usb-2.0 only one conversion it transmitted but with 8kHz/n
*/
cmd->convert_src &= TRIG_TIMER;
} else {
/* all conversion events happen simultaneously with a rate of
* 1kHz/n */
cmd->convert_src &= TRIG_NOW;
}
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
/* issue a trigger when scan is finished and start a new scan */
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
/* trigger at the end of count events or not, stop condition or not */
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/*
* step 2: make sure trigger sources are unique and mutually compatible
* note that mutual compatibility is not an issue here
*/
if (cmd->scan_begin_src != TRIG_FOLLOW &&
cmd->scan_begin_src != TRIG_EXT &&
cmd->scan_begin_src != TRIG_TIMER)
err++;
if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_src == TRIG_FOLLOW) {
/* internal trigger */
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
}
if (cmd->scan_begin_src == TRIG_TIMER) {
/* timer */
if (cmd->scan_begin_arg < 1000000) {
cmd->scan_begin_arg = 1000000;
err++;
}
}
/* not used now, is for later use */
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < 125000) {
cmd->convert_arg = 125000;
err++;
}
}
/* the same argument */
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
/* any count is allowed */
} else {
/* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: err=%d, "
"scan_begin_src=%d, scan_begin_arg=%d, convert_src=%d, "
"convert_arg=%d\n", dev->minor, err, cmd->scan_begin_src,
cmd->scan_begin_arg, cmd->convert_src, cmd->convert_arg);
if (err)
return 3;
return 0;
}
static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int chan, gain;
int i, ret;
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s\n", dev->minor, __func__);
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
for (i = 0; i < cmd->chanlist_len; ++i) {
chan = CR_CHAN(cmd->chanlist[i]);
gain = CR_RANGE(cmd->chanlist[i]);
if (i >= NUMOUTCHANNELS) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: %s: channel list too long\n",
dev->minor, __func__);
break;
}
this_usbduxsub->dac_commands[i] = (chan << 6);
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: dac command for ch %d is %x\n",
dev->minor, i, this_usbduxsub->dac_commands[i]);
}
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
if (0) { /* (this_usbduxsub->high_speed) */
/* 125us */
/* timing of the conversion itself: every 125 us */
this_usbduxsub->ao_timer = cmd->convert_arg / 125000;
} else {
/* 1ms */
/* timing of the scan: we get all channels at once */
this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000;
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: scan_begin_src=%d, scan_begin_arg=%d, "
"convert_src=%d, convert_arg=%d\n", dev->minor,
cmd->scan_begin_src, cmd->scan_begin_arg,
cmd->convert_src, cmd->convert_arg);
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: ao_timer=%d (ms)\n",
dev->minor, this_usbduxsub->ao_timer);
if (this_usbduxsub->ao_timer < 1) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux: ao_timer=%d, "
"scan_begin_arg=%d. "
"Not properly tested by cmdtest?\n",
dev->minor, this_usbduxsub->ao_timer,
cmd->scan_begin_arg);
up(&this_usbduxsub->sem);
return -EINVAL;
}
}
this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* not continuous */
/* counter */
/* high speed also scans everything at once */
if (0) { /* (this_usbduxsub->high_speed) */
this_usbduxsub->ao_sample_count =
(cmd->stop_arg) * (cmd->scan_end_arg);
} else {
/* there's no scan as the scan has been */
/* perf inside the FX2 */
/* data arrives as one packet */
this_usbduxsub->ao_sample_count = cmd->stop_arg;
}
this_usbduxsub->ao_continous = 0;
} else {
/* continous acquisition */
this_usbduxsub->ao_continous = 1;
this_usbduxsub->ao_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
this_usbduxsub->ao_cmd_running = 1;
ret = usbduxsub_submit_OutURBs(this_usbduxsub);
if (ret < 0) {
this_usbduxsub->ao_cmd_running = 0;
/* fixme: unlink here?? */
up(&this_usbduxsub->sem);
return ret;
}
s->async->inttrig = NULL;
} else {
/* TRIG_INT */
/* submit the urbs later */
/* wait for an internal signal */
s->async->inttrig = usbdux_ao_inttrig;
}
up(&this_usbduxsub->sem);
return 0;
}
static int usbdux_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
/* The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
* value COMEDI_INPUT or COMEDI_OUTPUT. */
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
s->io_bits |= 1 << chan; /* 1 means Out */
break;
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~(1 << chan);
break;
case INSN_CONFIG_DIO_QUERY:
data[1] =
(s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
break;
default:
return -EINVAL;
break;
}
/* we don't tell the firmware here as it would take 8 frames */
/* to submit the information. We do it in the insn_bits. */
return insn->n;
}
static int usbdux_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct usbduxsub *this_usbduxsub = dev->private;
int err;
if (!this_usbduxsub)
return -EFAULT;
if (insn->n != 2)
return -EINVAL;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
/* The insn data is a mask in data[0] and the new data
* in data[1], each channel cooresponding to a bit. */
s->state &= ~data[0];
s->state |= data[0] & data[1];
this_usbduxsub->dux_commands[1] = s->io_bits;
this_usbduxsub->dux_commands[2] = s->state;
/* This command also tells the firmware to return */
/* the digital input lines */
err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
data[1] = le16_to_cpu(this_usbduxsub->insnBuffer[1]);
up(&this_usbduxsub->sem);
return 2;
}
/* reads the 4 counters, only two are used just now */
static int usbdux_counter_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct usbduxsub *this_usbduxsub = dev->private;
int chan = insn->chanspec;
int err;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
err = send_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
err = receive_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
data[0] = le16_to_cpu(this_usbduxsub->insnBuffer[chan + 1]);
up(&this_usbduxsub->sem);
return 1;
}
static int usbdux_counter_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct usbduxsub *this_usbduxsub = dev->private;
int err;
if (!this_usbduxsub)
return -EFAULT;
down(&this_usbduxsub->sem);
if (!(this_usbduxsub->probed)) {
up(&this_usbduxsub->sem);
return -ENODEV;
}
this_usbduxsub->dux_commands[1] = insn->chanspec;
*((int16_t *) (this_usbduxsub->dux_commands + 2)) = cpu_to_le16(*data);
err = send_dux_commands(this_usbduxsub, WRITECOUNTERCOMMAND);
if (err < 0) {
up(&this_usbduxsub->sem);
return err;
}
up(&this_usbduxsub->sem);
return 1;
}
static int usbdux_counter_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
/* nothing to do so far */
return 2;
}
/***********************************/
/* PWM */
static int usbduxsub_unlink_PwmURBs(struct usbduxsub *usbduxsub_tmp)
{
int err = 0;
if (usbduxsub_tmp && usbduxsub_tmp->urbPwm) {
if (usbduxsub_tmp->urbPwm)
usb_kill_urb(usbduxsub_tmp->urbPwm);
dev_dbg(&usbduxsub_tmp->interface->dev,
"comedi: unlinked PwmURB: res=%d\n", err);
}
return err;
}
/* This cancels a running acquisition operation
* in any context.
*/
static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
{
int ret = 0;
if (!this_usbduxsub)
return -EFAULT;
dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__);
if (do_unlink)
ret = usbduxsub_unlink_PwmURBs(this_usbduxsub);
this_usbduxsub->pwm_cmd_running = 0;
return ret;
}
/* force unlink - is called by comedi */
static int usbdux_pwm_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct usbduxsub *this_usbduxsub = dev->private;
int res = 0;
/* unlink only if it is really running */
res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running);
dev_dbg(&this_usbduxsub->interface->dev,
"comedi %d: sending pwm off command to the usb device.\n",
dev->minor);
return send_dux_commands(this_usbduxsub, SENDPWMOFF);
}
static void usbduxsub_pwm_irq(struct urb *urb)
{
int ret;
struct usbduxsub *this_usbduxsub;
struct comedi_device *this_comedidev;
struct comedi_subdevice *s;
/* printk(KERN_DEBUG "PWM: IRQ\n"); */
/* the context variable points to the subdevice */
this_comedidev = urb->context;
/* the private structure of the subdevice is struct usbduxsub */
this_usbduxsub = this_comedidev->private;
s = this_comedidev->subdevices + SUBDEV_DA;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
/*
* after an unlink command, unplug, ... etc
* no unlink needed here. Already shutting down.
*/
if (this_usbduxsub->pwm_cmd_running)
usbdux_pwm_stop(this_usbduxsub, 0);
return;
default:
/* a real error */
if (this_usbduxsub->pwm_cmd_running) {
dev_err(&this_usbduxsub->interface->dev,
"comedi_: Non-zero urb status received in "
"pwm intr context: %d\n", urb->status);
usbdux_pwm_stop(this_usbduxsub, 0);
}
return;
}
/* are we actually running? */
if (!(this_usbduxsub->pwm_cmd_running))
return;
urb->transfer_buffer_length = this_usbduxsub->sizePwmBuf;
urb->dev = this_usbduxsub->usbdev;
urb->status = 0;
if (this_usbduxsub->pwm_cmd_running) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi_: pwm urb resubm failed in int-cont. "
"ret=%d", ret);
if (ret == EL2NSYNC)
dev_err(&this_usbduxsub->interface->dev,
"buggy USB host controller or bug in "
"IRQ handling!\n");
/* don't do an unlink here */
usbdux_pwm_stop(this_usbduxsub, 0);
}
}
}
static int usbduxsub_submit_PwmURBs(struct usbduxsub *usbduxsub)
{
int errFlag;
if (!usbduxsub)
return -EFAULT;
dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n");
/* in case of a resubmission after an unlink... */
usb_fill_bulk_urb(usbduxsub->urbPwm,
usbduxsub->usbdev,
usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP),
usbduxsub->urbPwm->transfer_buffer,
usbduxsub->sizePwmBuf, usbduxsub_pwm_irq,
usbduxsub->comedidev);
errFlag = usb_submit_urb(usbduxsub->urbPwm, GFP_ATOMIC);
if (errFlag) {
dev_err(&usbduxsub->interface->dev,
"comedi_: usbdux: pwm: usb_submit_urb error %d\n",
errFlag);
return errFlag;
}
return 0;
}
static int usbdux_pwm_period(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int period)
{
struct usbduxsub *this_usbduxsub = dev->private;
int fx2delay = 255;
if (period < MIN_PWM_PERIOD) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: illegal period setting for pwm.\n",
dev->minor);
return -EAGAIN;
} else {
fx2delay = period / ((int)(6 * 512 * (1.0 / 0.033))) - 6;
if (fx2delay > 255) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: period %d for pwm is too low.\n",
dev->minor, period);
return -EAGAIN;
}
}
this_usbduxsub->pwmDelay = fx2delay;
this_usbduxsub->pwmPeriod = period;
dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n",
__func__, period, fx2delay);
return 0;
}
/* is called from insn so there's no need to do all the sanity checks */
static int usbdux_pwm_start(struct comedi_device *dev,
struct comedi_subdevice *s)
{
int ret, i;
struct usbduxsub *this_usbduxsub = dev->private;
dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n",
dev->minor, __func__);
if (this_usbduxsub->pwm_cmd_running) {
/* already running */
return 0;
}
this_usbduxsub->dux_commands[1] = ((int8_t) this_usbduxsub->pwmDelay);
ret = send_dux_commands(this_usbduxsub, SENDPWMON);
if (ret < 0)
return ret;
/* initialise the buffer */
for (i = 0; i < this_usbduxsub->sizePwmBuf; i++)
((char *)(this_usbduxsub->urbPwm->transfer_buffer))[i] = 0;
this_usbduxsub->pwm_cmd_running = 1;
ret = usbduxsub_submit_PwmURBs(this_usbduxsub);
if (ret < 0) {
this_usbduxsub->pwm_cmd_running = 0;
return ret;
}
return 0;
}
/* generates the bit pattern for PWM with the optional sign bit */
static int usbdux_pwm_pattern(struct comedi_device *dev,
struct comedi_subdevice *s, int channel,
unsigned int value, unsigned int sign)
{
struct usbduxsub *this_usbduxsub = dev->private;
int i, szbuf;
char *pBuf;
char pwm_mask;
char sgn_mask;
char c;
if (!this_usbduxsub)
return -EFAULT;
/* this is the DIO bit which carries the PWM data */
pwm_mask = (1 << channel);
/* this is the DIO bit which carries the optional direction bit */
sgn_mask = (16 << channel);
/* this is the buffer which will be filled with the with bit */
/* pattern for one period */
szbuf = this_usbduxsub->sizePwmBuf;
pBuf = (char *)(this_usbduxsub->urbPwm->transfer_buffer);
for (i = 0; i < szbuf; i++) {
c = *pBuf;
/* reset bits */
c = c & (~pwm_mask);
/* set the bit as long as the index is lower than the value */
if (i < value)
c = c | pwm_mask;
/* set the optional sign bit for a relay */
if (!sign) {
/* positive value */
c = c & (~sgn_mask);
} else {
/* negative value */
c = c | sgn_mask;
}
*(pBuf++) = c;
}
return 1;
}
static int usbdux_pwm_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct usbduxsub *this_usbduxsub = dev->private;
if (!this_usbduxsub)
return -EFAULT;
if ((insn->n) != 1) {
/*
* doesn't make sense to have more than one value here because
* it would just overwrite the PWM buffer a couple of times
*/
return -EINVAL;
}
/*
* the sign is set via a special INSN only, this gives us 8 bits for
* normal operation
* relay sign 0 by default
*/
return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0);
}
static int usbdux_pwm_read(struct comedi_device *x1,
struct comedi_subdevice *x2, struct comedi_insn *x3,
unsigned int *x4)
{
/* not needed */
return -EINVAL;
};
/* switches on/off PWM */
static int usbdux_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct usbduxsub *this_usbduxsub = dev->private;
switch (data[0]) {
case INSN_CONFIG_ARM:
/* switch it on */
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s: pwm on\n", dev->minor, __func__);
/*
* if not zero the PWM is limited to a certain time which is
* not supported here
*/
if (data[1] != 0)
return -EINVAL;
return usbdux_pwm_start(dev, s);
case INSN_CONFIG_DISARM:
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s: pwm off\n", dev->minor, __func__);
return usbdux_pwm_cancel(dev, s);
case INSN_CONFIG_GET_PWM_STATUS:
/*
* to check if the USB transmission has failed or in case PWM
* was limited to n cycles to check if it has terminated
*/
data[1] = this_usbduxsub->pwm_cmd_running;
return 0;
case INSN_CONFIG_PWM_SET_PERIOD:
dev_dbg(&this_usbduxsub->interface->dev,
"comedi%d: %s: setting period\n", dev->minor, __func__);
return usbdux_pwm_period(dev, s, data[1]);
case INSN_CONFIG_PWM_GET_PERIOD:
data[1] = this_usbduxsub->pwmPeriod;
return 0;
case INSN_CONFIG_PWM_SET_H_BRIDGE:
/* value in the first byte and the sign in the second for a
relay */
return usbdux_pwm_pattern(dev, s,
/* the channel number */
CR_CHAN(insn->chanspec),
/* actual PWM data */
data[1],
/* just a sign */
(data[2] != 0));
case INSN_CONFIG_PWM_GET_H_BRIDGE:
/* values are not kept in this driver, nothing to return here */
return -EINVAL;
}
return -EINVAL;
}
/* end of PWM */
/*****************************************************************/
static void tidy_up(struct usbduxsub *usbduxsub_tmp)
{
int i;
if (!usbduxsub_tmp)
return;
dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n");
/* shows the usb subsystem that the driver is down */
if (usbduxsub_tmp->interface)
usb_set_intfdata(usbduxsub_tmp->interface, NULL);
usbduxsub_tmp->probed = 0;
if (usbduxsub_tmp->urbIn) {
if (usbduxsub_tmp->ai_cmd_running) {
usbduxsub_tmp->ai_cmd_running = 0;
usbduxsub_unlink_InURBs(usbduxsub_tmp);
}
for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
kfree(usbduxsub_tmp->urbIn[i]->transfer_buffer);
usbduxsub_tmp->urbIn[i]->transfer_buffer = NULL;
usb_kill_urb(usbduxsub_tmp->urbIn[i]);
usb_free_urb(usbduxsub_tmp->urbIn[i]);
usbduxsub_tmp->urbIn[i] = NULL;
}
kfree(usbduxsub_tmp->urbIn);
usbduxsub_tmp->urbIn = NULL;
}
if (usbduxsub_tmp->urbOut) {
if (usbduxsub_tmp->ao_cmd_running) {
usbduxsub_tmp->ao_cmd_running = 0;
usbduxsub_unlink_OutURBs(usbduxsub_tmp);
}
for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
kfree(usbduxsub_tmp->urbOut[i]->transfer_buffer);
usbduxsub_tmp->urbOut[i]->transfer_buffer = NULL;
if (usbduxsub_tmp->urbOut[i]) {
usb_kill_urb(usbduxsub_tmp->urbOut[i]);
usb_free_urb(usbduxsub_tmp->urbOut[i]);
usbduxsub_tmp->urbOut[i] = NULL;
}
}
kfree(usbduxsub_tmp->urbOut);
usbduxsub_tmp->urbOut = NULL;
}
if (usbduxsub_tmp->urbPwm) {
if (usbduxsub_tmp->pwm_cmd_running) {
usbduxsub_tmp->pwm_cmd_running = 0;
usbduxsub_unlink_PwmURBs(usbduxsub_tmp);
}
kfree(usbduxsub_tmp->urbPwm->transfer_buffer);
usbduxsub_tmp->urbPwm->transfer_buffer = NULL;
usb_kill_urb(usbduxsub_tmp->urbPwm);
usb_free_urb(usbduxsub_tmp->urbPwm);
usbduxsub_tmp->urbPwm = NULL;
}
kfree(usbduxsub_tmp->inBuffer);
usbduxsub_tmp->inBuffer = NULL;
kfree(usbduxsub_tmp->insnBuffer);
usbduxsub_tmp->insnBuffer = NULL;
kfree(usbduxsub_tmp->outBuffer);
usbduxsub_tmp->outBuffer = NULL;
kfree(usbduxsub_tmp->dac_commands);
usbduxsub_tmp->dac_commands = NULL;
kfree(usbduxsub_tmp->dux_commands);
usbduxsub_tmp->dux_commands = NULL;
usbduxsub_tmp->ai_cmd_running = 0;
usbduxsub_tmp->ao_cmd_running = 0;
usbduxsub_tmp->pwm_cmd_running = 0;
}
static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
void *context)
{
struct usbduxsub *usbduxsub_tmp = context;
struct usb_device *usbdev = usbduxsub_tmp->usbdev;
int ret;
if (fw == NULL) {
dev_err(&usbdev->dev,
"Firmware complete handler without firmware!\n");
return;
}
/*
* we need to upload the firmware here because fw will be
* freed once we've left this function
*/
ret = firmwareUpload(usbduxsub_tmp, fw->data, fw->size);
if (ret) {
dev_err(&usbdev->dev,
"Could not upload firmware (err=%d)\n", ret);
goto out;
}
comedi_usb_auto_config(usbdev, BOARDNAME);
out:
release_firmware(fw);
}
/* allocate memory for the urbs and initialise them */
static int usbduxsub_probe(struct usb_interface *uinterf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(uinterf);
struct device *dev = &uinterf->dev;
int i;
int index;
int ret;
dev_dbg(dev, "comedi_: usbdux_: "
"finding a free structure for the usb-device\n");
down(&start_stop_sem);
/* look for a free place in the usbdux array */
index = -1;
for (i = 0; i < NUMUSBDUX; i++) {
if (!(usbduxsub[i].probed)) {
index = i;
break;
}
}
/* no more space */
if (index == -1) {
dev_err(dev, "Too many usbdux-devices connected.\n");
up(&start_stop_sem);
return -EMFILE;
}
dev_dbg(dev, "comedi_: usbdux: "
"usbduxsub[%d] is ready to connect to comedi.\n", index);
sema_init(&(usbduxsub[index].sem), 1);
/* save a pointer to the usb device */
usbduxsub[index].usbdev = udev;
/* 2.6: save the interface itself */
usbduxsub[index].interface = uinterf;
/* get the interface number from the interface */
usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
/* hand the private data over to the usb subsystem */
/* will be needed for disconnect */
usb_set_intfdata(uinterf, &(usbduxsub[index]));
dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum);
/* test if it is high speed (USB 2.0) */
usbduxsub[index].high_speed =
(usbduxsub[index].usbdev->speed == USB_SPEED_HIGH);
/* create space for the commands of the DA converter */
usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
if (!usbduxsub[index].dac_commands) {
dev_err(dev, "comedi_: usbdux: "
"error alloc space for dac commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
/* create space for the commands going to the usb device */
usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
if (!usbduxsub[index].dux_commands) {
dev_err(dev, "comedi_: usbdux: "
"error alloc space for dux commands\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
/* create space for the in buffer and set it to zero */
usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].inBuffer)) {
dev_err(dev, "comedi_: usbdux: "
"could not alloc space for inBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
/* create space of the instruction buffer */
usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
if (!(usbduxsub[index].insnBuffer)) {
dev_err(dev, "comedi_: usbdux: "
"could not alloc space for insnBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
/* create space for the outbuffer */
usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].outBuffer)) {
dev_err(dev, "comedi_: usbdux: "
"could not alloc space for outBuffer\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
/* setting to alternate setting 3: enabling iso ep and bulk ep. */
i = usb_set_interface(usbduxsub[index].usbdev,
usbduxsub[index].ifnum, 3);
if (i < 0) {
dev_err(dev, "comedi_: usbdux%d: "
"could not set alternate setting 3 in high speed.\n",
index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENODEV;
}
if (usbduxsub[index].high_speed)
usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSHIGH;
else
usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL;
usbduxsub[index].urbIn =
kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfInBuffers,
GFP_KERNEL);
if (!(usbduxsub[index].urbIn)) {
dev_err(dev, "comedi_: usbdux: Could not alloc. urbIn array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
for (i = 0; i < usbduxsub[index].numOfInBuffers; i++) {
/* one frame: 1ms */
usbduxsub[index].urbIn[i] = usb_alloc_urb(1, GFP_KERNEL);
if (usbduxsub[index].urbIn[i] == NULL) {
dev_err(dev, "comedi_: usbdux%d: "
"Could not alloc. urb(%d)\n", index, i);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
usbduxsub[index].urbIn[i]->dev = usbduxsub[index].usbdev;
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
usbduxsub[index].urbIn[i]->context = NULL;
usbduxsub[index].urbIn[i]->pipe =
usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP);
usbduxsub[index].urbIn[i]->transfer_flags = URB_ISO_ASAP;
usbduxsub[index].urbIn[i]->transfer_buffer =
kzalloc(SIZEINBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) {
dev_err(dev, "comedi_: usbdux%d: "
"could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
usbduxsub[index].urbIn[i]->complete = usbduxsub_ai_IsocIrq;
usbduxsub[index].urbIn[i]->number_of_packets = 1;
usbduxsub[index].urbIn[i]->transfer_buffer_length = SIZEINBUF;
usbduxsub[index].urbIn[i]->iso_frame_desc[0].offset = 0;
usbduxsub[index].urbIn[i]->iso_frame_desc[0].length = SIZEINBUF;
}
/* out */
if (usbduxsub[index].high_speed)
usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSHIGH;
else
usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL;
usbduxsub[index].urbOut =
kzalloc(sizeof(struct urb *) * usbduxsub[index].numOfOutBuffers,
GFP_KERNEL);
if (!(usbduxsub[index].urbOut)) {
dev_err(dev, "comedi_: usbdux: "
"Could not alloc. urbOut array\n");
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
for (i = 0; i < usbduxsub[index].numOfOutBuffers; i++) {
/* one frame: 1ms */
usbduxsub[index].urbOut[i] = usb_alloc_urb(1, GFP_KERNEL);
if (usbduxsub[index].urbOut[i] == NULL) {
dev_err(dev, "comedi_: usbdux%d: "
"Could not alloc. urb(%d)\n", index, i);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
usbduxsub[index].urbOut[i]->dev = usbduxsub[index].usbdev;
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
usbduxsub[index].urbOut[i]->context = NULL;
usbduxsub[index].urbOut[i]->pipe =
usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP);
usbduxsub[index].urbOut[i]->transfer_flags = URB_ISO_ASAP;
usbduxsub[index].urbOut[i]->transfer_buffer =
kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) {
dev_err(dev, "comedi_: usbdux%d: "
"could not alloc. transb.\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
usbduxsub[index].urbOut[i]->complete = usbduxsub_ao_IsocIrq;
usbduxsub[index].urbOut[i]->number_of_packets = 1;
usbduxsub[index].urbOut[i]->transfer_buffer_length = SIZEOUTBUF;
usbduxsub[index].urbOut[i]->iso_frame_desc[0].offset = 0;
usbduxsub[index].urbOut[i]->iso_frame_desc[0].length =
SIZEOUTBUF;
if (usbduxsub[index].high_speed) {
/* uframes */
usbduxsub[index].urbOut[i]->interval = 8;
} else {
/* frames */
usbduxsub[index].urbOut[i]->interval = 1;
}
}
/* pwm */
if (usbduxsub[index].high_speed) {
/* max bulk ep size in high speed */
usbduxsub[index].sizePwmBuf = 512;
usbduxsub[index].urbPwm = usb_alloc_urb(0, GFP_KERNEL);
if (usbduxsub[index].urbPwm == NULL) {
dev_err(dev, "comedi_: usbdux%d: "
"Could not alloc. pwm urb\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
usbduxsub[index].urbPwm->transfer_buffer =
kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL);
if (!(usbduxsub[index].urbPwm->transfer_buffer)) {
dev_err(dev, "comedi_: usbdux%d: "
"could not alloc. transb. for pwm\n", index);
tidy_up(&(usbduxsub[index]));
up(&start_stop_sem);
return -ENOMEM;
}
} else {
usbduxsub[index].urbPwm = NULL;
usbduxsub[index].sizePwmBuf = 0;
}
usbduxsub[index].ai_cmd_running = 0;
usbduxsub[index].ao_cmd_running = 0;
usbduxsub[index].pwm_cmd_running = 0;
/* we've reached the bottom of the function */
usbduxsub[index].probed = 1;
up(&start_stop_sem);
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG,
"usbdux_firmware.bin",
&udev->dev,
GFP_KERNEL,
usbduxsub + index,
usbdux_firmware_request_complete_handler);
if (ret) {
dev_err(dev, "Could not load firmware (err=%d)\n", ret);
return ret;
}
dev_info(dev, "comedi_: usbdux%d "
"has been successfully initialised.\n", index);
/* success */
return 0;
}
static void usbduxsub_disconnect(struct usb_interface *intf)
{
struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf);
struct usb_device *udev = interface_to_usbdev(intf);
if (!usbduxsub_tmp) {
dev_err(&intf->dev,
"comedi_: disconnect called with null pointer.\n");
return;
}
if (usbduxsub_tmp->usbdev != udev) {
dev_err(&intf->dev, "comedi_: BUG! called with wrong ptr!!!\n");
return;
}
comedi_usb_auto_unconfig(udev);
down(&start_stop_sem);
down(&usbduxsub_tmp->sem);
tidy_up(usbduxsub_tmp);
up(&usbduxsub_tmp->sem);
up(&start_stop_sem);
dev_dbg(&intf->dev, "comedi_: disconnected from the usb\n");
}
/* is called when comedi-config is called */
static int usbdux_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
int index;
int i;
struct usbduxsub *udev;
struct comedi_subdevice *s = NULL;
dev->private = NULL;
down(&start_stop_sem);
/* find a valid device which has been detected by the probe function of
* the usb */
index = -1;
for (i = 0; i < NUMUSBDUX; i++) {
if ((usbduxsub[i].probed) && (!usbduxsub[i].attached)) {
index = i;
break;
}
}
if (index < 0) {
printk(KERN_ERR "comedi%d: usbdux: error: attach failed, no "
"usbdux devs connected to the usb bus.\n", dev->minor);
up(&start_stop_sem);
return -ENODEV;
}
udev = &usbduxsub[index];
down(&udev->sem);
/* pointer back to the corresponding comedi device */
udev->comedidev = dev;
/* trying to upload the firmware into the chip */
if (comedi_aux_data(it->options, 0) &&
it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]) {
firmwareUpload(udev, comedi_aux_data(it->options, 0),
it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]);
}
dev->board_name = BOARDNAME;
/* set number of subdevices */
if (udev->high_speed) {
/* with pwm */
dev->n_subdevices = 5;
} else {
/* without pwm */
dev->n_subdevices = 4;
}
/* allocate space for the subdevices */
ret = alloc_subdevices(dev, dev->n_subdevices);
if (ret < 0) {
dev_err(&udev->interface->dev,
"comedi%d: error alloc space for subdev\n", dev->minor);
up(&udev->sem);
up(&start_stop_sem);
return ret;
}
dev_info(&udev->interface->dev,
"comedi%d: usb-device %d is attached to comedi.\n",
dev->minor, index);
/* private structure is also simply the usb-structure */
dev->private = udev;
/* the first subdevice is the A/D converter */
s = dev->subdevices + SUBDEV_AD;
/* the URBs get the comedi subdevice */
/* which is responsible for reading */
/* this is the subdevice which reads data */
dev->read_subdev = s;
/* the subdevice receives as private structure the */
/* usb-structure */
s->private = NULL;
/* analog input */
s->type = COMEDI_SUBD_AI;
/* readable and ref is to ground */
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
/* 8 channels */
s->n_chan = 8;
/* length of the channellist */
s->len_chanlist = 8;
/* callback functions */
s->insn_read = usbdux_ai_insn_read;
s->do_cmdtest = usbdux_ai_cmdtest;
s->do_cmd = usbdux_ai_cmd;
s->cancel = usbdux_ai_cancel;
/* max value from the A/D converter (12bit) */
s->maxdata = 0xfff;
/* range table to convert to physical units */
s->range_table = (&range_usbdux_ai_range);
/* analog out */
s = dev->subdevices + SUBDEV_DA;
/* analog out */
s->type = COMEDI_SUBD_AO;
/* backward pointer */
dev->write_subdev = s;
/* the subdevice receives as private structure the */
/* usb-structure */
s->private = NULL;
/* are writable */
s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
/* 4 channels */
s->n_chan = 4;
/* length of the channellist */
s->len_chanlist = 4;
/* 12 bit resolution */
s->maxdata = 0x0fff;
/* bipolar range */
s->range_table = (&range_usbdux_ao_range);
/* callback */
s->do_cmdtest = usbdux_ao_cmdtest;
s->do_cmd = usbdux_ao_cmd;
s->cancel = usbdux_ao_cancel;
s->insn_read = usbdux_ao_insn_read;
s->insn_write = usbdux_ao_insn_write;
/* digital I/O */
s = dev->subdevices + SUBDEV_DIO;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = (&range_digital);
s->insn_bits = usbdux_dio_insn_bits;
s->insn_config = usbdux_dio_insn_config;
/* we don't use it */
s->private = NULL;
/* counter */
s = dev->subdevices + SUBDEV_COUNTER;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 4;
s->maxdata = 0xFFFF;
s->insn_read = usbdux_counter_read;
s->insn_write = usbdux_counter_write;
s->insn_config = usbdux_counter_config;
if (udev->high_speed) {
/* timer / pwm */
s = dev->subdevices + SUBDEV_PWM;
s->type = COMEDI_SUBD_PWM;
s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
s->n_chan = 8;
/* this defines the max duty cycle resolution */
s->maxdata = udev->sizePwmBuf;
s->insn_write = usbdux_pwm_write;
s->insn_read = usbdux_pwm_read;
s->insn_config = usbdux_pwm_config;
usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
}
/* finally decide that it's attached */
udev->attached = 1;
up(&udev->sem);
up(&start_stop_sem);
dev_info(&udev->interface->dev, "comedi%d: attached to usbdux.\n",
dev->minor);
return 0;
}
static int usbdux_detach(struct comedi_device *dev)
{
struct usbduxsub *usbduxsub_tmp;
if (!dev) {
printk(KERN_ERR
"comedi?: usbdux: detach without dev variable...\n");
return -EFAULT;
}
usbduxsub_tmp = dev->private;
if (!usbduxsub_tmp) {
printk(KERN_ERR
"comedi?: usbdux: detach without ptr to usbduxsub[]\n");
return -EFAULT;
}
dev_dbg(&usbduxsub_tmp->interface->dev, "comedi%d: detach usb device\n",
dev->minor);
down(&usbduxsub_tmp->sem);
/* Don't allow detach to free the private structure */
/* It's one entry of of usbduxsub[] */
dev->private = NULL;
usbduxsub_tmp->attached = 0;
usbduxsub_tmp->comedidev = NULL;
dev_dbg(&usbduxsub_tmp->interface->dev,
"comedi%d: detach: successfully removed\n", dev->minor);
up(&usbduxsub_tmp->sem);
return 0;
}
/* main driver struct */
static struct comedi_driver driver_usbdux = {
.driver_name = "usbdux",
.module = THIS_MODULE,
.attach = usbdux_attach,
.detach = usbdux_detach,
};
/* Table with the USB-devices: just now only testing IDs */
static const struct usb_device_id usbduxsub_table[] = {
{USB_DEVICE(0x13d8, 0x0001)},
{USB_DEVICE(0x13d8, 0x0002)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usbduxsub_table);
/* The usbduxsub-driver */
static struct usb_driver usbduxsub_driver = {
.name = BOARDNAME,
.probe = usbduxsub_probe,
.disconnect = usbduxsub_disconnect,
.id_table = usbduxsub_table,
};
/* Can't use the nice macro as I have also to initialise the USB */
/* subsystem: */
/* registering the usb-system _and_ the comedi-driver */
static int __init init_usbdux(void)
{
printk(KERN_INFO KBUILD_MODNAME ": "
DRIVER_VERSION ":" DRIVER_DESC "\n");
usb_register(&usbduxsub_driver);
comedi_driver_register(&driver_usbdux);
return 0;
}
/* deregistering the comedi driver and the usb-subsystem */
static void __exit exit_usbdux(void)
{
comedi_driver_unregister(&driver_usbdux);
usb_deregister(&usbduxsub_driver);
}
module_init(init_usbdux);
module_exit(exit_usbdux);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
lnfamous/Kernel_Htc_Pico_CyanogenMod9 | fs/fscache/operation.c | 8013 | 12096 | /* FS-Cache worker operation management routines
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/filesystems/caching/operations.txt
*/
#define FSCACHE_DEBUG_LEVEL OPERATION
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "internal.h"
atomic_t fscache_op_debug_id;
EXPORT_SYMBOL(fscache_op_debug_id);
/**
* fscache_enqueue_operation - Enqueue an operation for processing
* @op: The operation to enqueue
*
* Enqueue an operation for processing by the FS-Cache thread pool.
*
* This will get its own ref on the object.
*/
void fscache_enqueue_operation(struct fscache_operation *op)
{
_enter("{OBJ%x OP%x,%u}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERT(list_empty(&op->pend_link));
ASSERT(op->processor != NULL);
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
ASSERTCMP(atomic_read(&op->usage), >, 0);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
case FSCACHE_OP_ASYNC:
_debug("queue async");
atomic_inc(&op->usage);
if (!queue_work(fscache_op_wq, &op->work))
fscache_put_operation(op);
break;
case FSCACHE_OP_MYTHREAD:
_debug("queue for caller's attention");
break;
default:
printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
op->flags);
BUG();
break;
}
}
EXPORT_SYMBOL(fscache_enqueue_operation);
/*
* start an op running
*/
static void fscache_run_op(struct fscache_object *object,
struct fscache_operation *op)
{
object->n_in_progress++;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
if (op->processor)
fscache_enqueue_operation(op);
fscache_stat(&fscache_n_op_run);
}
/*
* submit an exclusive operation for an object
* - other ops are excluded from running simultaneously with this one
* - this gets any extra refs it needs on an op
*/
int fscache_submit_exclusive_op(struct fscache_object *object,
struct fscache_operation *op)
{
int ret;
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ret = -ENOBUFS;
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
object->n_exclusive++; /* reads and writes must wait */
if (object->n_ops > 1) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
} else if (!list_empty(&object->pending_ops)) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
fscache_start_operations(object);
} else {
ASSERTCMP(object->n_in_progress, ==, 0);
fscache_run_op(object, op);
}
/* need to issue a new write op after this */
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
ret = 0;
} else if (object->state == FSCACHE_OBJECT_CREATING) {
op->object = object;
object->n_ops++;
object->n_exclusive++; /* reads and writes must wait */
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else {
/* not allowed to submit ops in any other state */
BUG();
}
spin_unlock(&object->lock);
return ret;
}
/*
* report an unexpected submission
*/
static void fscache_report_unexpected_submission(struct fscache_object *object,
struct fscache_operation *op,
unsigned long ostate)
{
static bool once_only;
struct fscache_operation *p;
unsigned n;
if (once_only)
return;
once_only = true;
kdebug("unexpected submission OP%x [OBJ%x %s]",
op->debug_id, object->debug_id,
fscache_object_states[object->state]);
kdebug("objstate=%s [%s]",
fscache_object_states[object->state],
fscache_object_states[ostate]);
kdebug("objflags=%lx", object->flags);
kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
kdebug("ops=%u inp=%u exc=%u",
object->n_ops, object->n_in_progress, object->n_exclusive);
if (!list_empty(&object->pending_ops)) {
n = 0;
list_for_each_entry(p, &object->pending_ops, pend_link) {
ASSERTCMP(p->object, ==, object);
kdebug("%p %p", op->processor, op->release);
n++;
}
kdebug("n=%u", n);
}
dump_stack();
}
/*
* submit an operation for an object
* - objects may be submitted only in the following states:
* - during object creation (write ops may be submitted)
* - whilst the object is active
* - after an I/O error incurred in one of the two above states (op rejected)
* - this gets any extra refs it needs on an op
*/
int fscache_submit_op(struct fscache_object *object,
struct fscache_operation *op)
{
unsigned long ostate;
int ret;
_enter("{OBJ%x OP%x},{%u}",
object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ostate = object->state;
smp_rmb();
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
if (object->n_exclusive > 0) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
} else if (!list_empty(&object->pending_ops)) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
fscache_start_operations(object);
} else {
ASSERTCMP(object->n_exclusive, ==, 0);
fscache_run_op(object, op);
}
ret = 0;
} else if (object->state == FSCACHE_OBJECT_CREATING) {
op->object = object;
object->n_ops++;
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else if (object->state == FSCACHE_OBJECT_DYING ||
object->state == FSCACHE_OBJECT_LC_DYING ||
object->state == FSCACHE_OBJECT_WITHDRAWING) {
fscache_stat(&fscache_n_op_rejected);
ret = -ENOBUFS;
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object));
ret = -ENOBUFS;
} else {
ret = -ENOBUFS;
}
spin_unlock(&object->lock);
return ret;
}
/*
* queue an object for withdrawal on error, aborting all following asynchronous
* operations
*/
void fscache_abort_object(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
}
/*
* jump start the operation processing on an object
* - caller must hold object->lock
*/
void fscache_start_operations(struct fscache_object *object)
{
struct fscache_operation *op;
bool stop = false;
while (!list_empty(&object->pending_ops) && !stop) {
op = list_entry(object->pending_ops.next,
struct fscache_operation, pend_link);
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
if (object->n_in_progress > 0)
break;
stop = true;
}
list_del_init(&op->pend_link);
fscache_run_op(object, op);
/* the pending queue was holding a ref on the object */
fscache_put_operation(op);
}
ASSERTCMP(object->n_in_progress, <=, object->n_ops);
_debug("woke %d ops on OBJ%x",
object->n_in_progress, object->debug_id);
}
/*
* cancel an operation that's pending on an object
*/
int fscache_cancel_op(struct fscache_operation *op)
{
struct fscache_object *object = op->object;
int ret;
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
spin_lock(&object->lock);
ret = -EBUSY;
if (!list_empty(&op->pend_link)) {
fscache_stat(&fscache_n_op_cancelled);
list_del_init(&op->pend_link);
object->n_ops--;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
fscache_put_operation(op);
ret = 0;
}
spin_unlock(&object->lock);
_leave(" = %d", ret);
return ret;
}
/*
* release an operation
* - queues pending ops if this is the last in-progress op
*/
void fscache_put_operation(struct fscache_operation *op)
{
struct fscache_object *object;
struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
if (!atomic_dec_and_test(&op->usage))
return;
_debug("PUT OP");
if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
BUG();
fscache_stat(&fscache_n_op_release);
if (op->release) {
op->release(op);
op->release = NULL;
}
object = op->object;
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
atomic_dec(&object->n_reads);
/* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the
* lock, and defer it otherwise */
if (!spin_trylock(&object->lock)) {
_debug("defer put");
fscache_stat(&fscache_n_op_deferred_release);
cache = object->cache;
spin_lock(&cache->op_gc_list_lock);
list_add_tail(&op->pend_link, &cache->op_gc_list);
spin_unlock(&cache->op_gc_list_lock);
schedule_work(&cache->op_gc);
_leave(" [defer]");
return;
}
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
ASSERTCMP(object->n_exclusive, >, 0);
object->n_exclusive--;
}
ASSERTCMP(object->n_in_progress, >, 0);
object->n_in_progress--;
if (object->n_in_progress == 0)
fscache_start_operations(object);
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
if (object->n_ops == 0)
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&object->lock);
kfree(op);
_leave(" [done]");
}
EXPORT_SYMBOL(fscache_put_operation);
/*
* garbage collect operations that have had their release deferred
*/
void fscache_operation_gc(struct work_struct *work)
{
struct fscache_operation *op;
struct fscache_object *object;
struct fscache_cache *cache =
container_of(work, struct fscache_cache, op_gc);
int count = 0;
_enter("");
do {
spin_lock(&cache->op_gc_list_lock);
if (list_empty(&cache->op_gc_list)) {
spin_unlock(&cache->op_gc_list_lock);
break;
}
op = list_entry(cache->op_gc_list.next,
struct fscache_operation, pend_link);
list_del(&op->pend_link);
spin_unlock(&cache->op_gc_list_lock);
object = op->object;
_debug("GC DEFERRED REL OBJ%x OP%x",
object->debug_id, op->debug_id);
fscache_stat(&fscache_n_op_gc);
ASSERTCMP(atomic_read(&op->usage), ==, 0);
spin_lock(&object->lock);
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
ASSERTCMP(object->n_exclusive, >, 0);
object->n_exclusive--;
}
ASSERTCMP(object->n_in_progress, >, 0);
object->n_in_progress--;
if (object->n_in_progress == 0)
fscache_start_operations(object);
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
if (object->n_ops == 0)
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&object->lock);
} while (count++ < 20);
if (!list_empty(&cache->op_gc_list))
schedule_work(&cache->op_gc);
_leave("");
}
/*
* execute an operation using fs_op_wq to provide processing context -
* the caller holds a ref to this object, so we don't need to hold one
*/
void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERT(op->processor != NULL);
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave("");
}
| gpl-2.0 |
bigbiff/android_device_samsung_mondrianwfiue | fs/fscache/operation.c | 8013 | 12096 | /* FS-Cache worker operation management routines
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/filesystems/caching/operations.txt
*/
#define FSCACHE_DEBUG_LEVEL OPERATION
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "internal.h"
atomic_t fscache_op_debug_id;
EXPORT_SYMBOL(fscache_op_debug_id);
/**
* fscache_enqueue_operation - Enqueue an operation for processing
* @op: The operation to enqueue
*
* Enqueue an operation for processing by the FS-Cache thread pool.
*
* This will get its own ref on the object.
*/
void fscache_enqueue_operation(struct fscache_operation *op)
{
_enter("{OBJ%x OP%x,%u}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERT(list_empty(&op->pend_link));
ASSERT(op->processor != NULL);
ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
ASSERTCMP(atomic_read(&op->usage), >, 0);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
case FSCACHE_OP_ASYNC:
_debug("queue async");
atomic_inc(&op->usage);
if (!queue_work(fscache_op_wq, &op->work))
fscache_put_operation(op);
break;
case FSCACHE_OP_MYTHREAD:
_debug("queue for caller's attention");
break;
default:
printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
op->flags);
BUG();
break;
}
}
EXPORT_SYMBOL(fscache_enqueue_operation);
/*
* start an op running
*/
static void fscache_run_op(struct fscache_object *object,
struct fscache_operation *op)
{
object->n_in_progress++;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
if (op->processor)
fscache_enqueue_operation(op);
fscache_stat(&fscache_n_op_run);
}
/*
* submit an exclusive operation for an object
* - other ops are excluded from running simultaneously with this one
* - this gets any extra refs it needs on an op
*/
int fscache_submit_exclusive_op(struct fscache_object *object,
struct fscache_operation *op)
{
int ret;
_enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ret = -ENOBUFS;
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
object->n_exclusive++; /* reads and writes must wait */
if (object->n_ops > 1) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
} else if (!list_empty(&object->pending_ops)) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
fscache_start_operations(object);
} else {
ASSERTCMP(object->n_in_progress, ==, 0);
fscache_run_op(object, op);
}
/* need to issue a new write op after this */
clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
ret = 0;
} else if (object->state == FSCACHE_OBJECT_CREATING) {
op->object = object;
object->n_ops++;
object->n_exclusive++; /* reads and writes must wait */
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else {
/* not allowed to submit ops in any other state */
BUG();
}
spin_unlock(&object->lock);
return ret;
}
/*
* report an unexpected submission
*/
static void fscache_report_unexpected_submission(struct fscache_object *object,
struct fscache_operation *op,
unsigned long ostate)
{
static bool once_only;
struct fscache_operation *p;
unsigned n;
if (once_only)
return;
once_only = true;
kdebug("unexpected submission OP%x [OBJ%x %s]",
op->debug_id, object->debug_id,
fscache_object_states[object->state]);
kdebug("objstate=%s [%s]",
fscache_object_states[object->state],
fscache_object_states[ostate]);
kdebug("objflags=%lx", object->flags);
kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
kdebug("ops=%u inp=%u exc=%u",
object->n_ops, object->n_in_progress, object->n_exclusive);
if (!list_empty(&object->pending_ops)) {
n = 0;
list_for_each_entry(p, &object->pending_ops, pend_link) {
ASSERTCMP(p->object, ==, object);
kdebug("%p %p", op->processor, op->release);
n++;
}
kdebug("n=%u", n);
}
dump_stack();
}
/*
* submit an operation for an object
* - objects may be submitted only in the following states:
* - during object creation (write ops may be submitted)
* - whilst the object is active
* - after an I/O error incurred in one of the two above states (op rejected)
* - this gets any extra refs it needs on an op
*/
int fscache_submit_op(struct fscache_object *object,
struct fscache_operation *op)
{
unsigned long ostate;
int ret;
_enter("{OBJ%x OP%x},{%u}",
object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
spin_lock(&object->lock);
ASSERTCMP(object->n_ops, >=, object->n_in_progress);
ASSERTCMP(object->n_ops, >=, object->n_exclusive);
ASSERT(list_empty(&op->pend_link));
ostate = object->state;
smp_rmb();
if (fscache_object_is_active(object)) {
op->object = object;
object->n_ops++;
if (object->n_exclusive > 0) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
} else if (!list_empty(&object->pending_ops)) {
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
fscache_start_operations(object);
} else {
ASSERTCMP(object->n_exclusive, ==, 0);
fscache_run_op(object, op);
}
ret = 0;
} else if (object->state == FSCACHE_OBJECT_CREATING) {
op->object = object;
object->n_ops++;
atomic_inc(&op->usage);
list_add_tail(&op->pend_link, &object->pending_ops);
fscache_stat(&fscache_n_op_pend);
ret = 0;
} else if (object->state == FSCACHE_OBJECT_DYING ||
object->state == FSCACHE_OBJECT_LC_DYING ||
object->state == FSCACHE_OBJECT_WITHDRAWING) {
fscache_stat(&fscache_n_op_rejected);
ret = -ENOBUFS;
} else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
fscache_report_unexpected_submission(object, op, ostate);
ASSERT(!fscache_object_is_active(object));
ret = -ENOBUFS;
} else {
ret = -ENOBUFS;
}
spin_unlock(&object->lock);
return ret;
}
/*
* queue an object for withdrawal on error, aborting all following asynchronous
* operations
*/
void fscache_abort_object(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
}
/*
* jump start the operation processing on an object
* - caller must hold object->lock
*/
void fscache_start_operations(struct fscache_object *object)
{
struct fscache_operation *op;
bool stop = false;
while (!list_empty(&object->pending_ops) && !stop) {
op = list_entry(object->pending_ops.next,
struct fscache_operation, pend_link);
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
if (object->n_in_progress > 0)
break;
stop = true;
}
list_del_init(&op->pend_link);
fscache_run_op(object, op);
/* the pending queue was holding a ref on the object */
fscache_put_operation(op);
}
ASSERTCMP(object->n_in_progress, <=, object->n_ops);
_debug("woke %d ops on OBJ%x",
object->n_in_progress, object->debug_id);
}
/*
* cancel an operation that's pending on an object
*/
int fscache_cancel_op(struct fscache_operation *op)
{
struct fscache_object *object = op->object;
int ret;
_enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
spin_lock(&object->lock);
ret = -EBUSY;
if (!list_empty(&op->pend_link)) {
fscache_stat(&fscache_n_op_cancelled);
list_del_init(&op->pend_link);
object->n_ops--;
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
object->n_exclusive--;
if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
fscache_put_operation(op);
ret = 0;
}
spin_unlock(&object->lock);
_leave(" = %d", ret);
return ret;
}
/*
* release an operation
* - queues pending ops if this is the last in-progress op
*/
void fscache_put_operation(struct fscache_operation *op)
{
struct fscache_object *object;
struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
if (!atomic_dec_and_test(&op->usage))
return;
_debug("PUT OP");
if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
BUG();
fscache_stat(&fscache_n_op_release);
if (op->release) {
op->release(op);
op->release = NULL;
}
object = op->object;
if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
atomic_dec(&object->n_reads);
/* now... we may get called with the object spinlock held, so we
* complete the cleanup here only if we can immediately acquire the
* lock, and defer it otherwise */
if (!spin_trylock(&object->lock)) {
_debug("defer put");
fscache_stat(&fscache_n_op_deferred_release);
cache = object->cache;
spin_lock(&cache->op_gc_list_lock);
list_add_tail(&op->pend_link, &cache->op_gc_list);
spin_unlock(&cache->op_gc_list_lock);
schedule_work(&cache->op_gc);
_leave(" [defer]");
return;
}
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
ASSERTCMP(object->n_exclusive, >, 0);
object->n_exclusive--;
}
ASSERTCMP(object->n_in_progress, >, 0);
object->n_in_progress--;
if (object->n_in_progress == 0)
fscache_start_operations(object);
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
if (object->n_ops == 0)
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&object->lock);
kfree(op);
_leave(" [done]");
}
EXPORT_SYMBOL(fscache_put_operation);
/*
* garbage collect operations that have had their release deferred
*/
void fscache_operation_gc(struct work_struct *work)
{
struct fscache_operation *op;
struct fscache_object *object;
struct fscache_cache *cache =
container_of(work, struct fscache_cache, op_gc);
int count = 0;
_enter("");
do {
spin_lock(&cache->op_gc_list_lock);
if (list_empty(&cache->op_gc_list)) {
spin_unlock(&cache->op_gc_list_lock);
break;
}
op = list_entry(cache->op_gc_list.next,
struct fscache_operation, pend_link);
list_del(&op->pend_link);
spin_unlock(&cache->op_gc_list_lock);
object = op->object;
_debug("GC DEFERRED REL OBJ%x OP%x",
object->debug_id, op->debug_id);
fscache_stat(&fscache_n_op_gc);
ASSERTCMP(atomic_read(&op->usage), ==, 0);
spin_lock(&object->lock);
if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
ASSERTCMP(object->n_exclusive, >, 0);
object->n_exclusive--;
}
ASSERTCMP(object->n_in_progress, >, 0);
object->n_in_progress--;
if (object->n_in_progress == 0)
fscache_start_operations(object);
ASSERTCMP(object->n_ops, >, 0);
object->n_ops--;
if (object->n_ops == 0)
fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
spin_unlock(&object->lock);
} while (count++ < 20);
if (!list_empty(&cache->op_gc_list))
schedule_work(&cache->op_gc);
_leave("");
}
/*
* execute an operation using fs_op_wq to provide processing context -
* the caller holds a ref to this object, so we don't need to hold one
*/
void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
ASSERT(op->processor != NULL);
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave("");
}
| gpl-2.0 |
lindsaytheflint/stone | fs/fscache/fsdef.c | 12109 | 4324 | /* Filesystem index definition
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include "internal.h"
static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax);
static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax);
static
enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data,
const void *data,
uint16_t datalen);
/*
* The root index is owned by FS-Cache itself.
*
* When a netfs requests caching facilities, FS-Cache will, if one doesn't
* already exist, create an entry in the root index with the key being the name
* of the netfs ("AFS" for example), and the auxiliary data holding the index
* structure version supplied by the netfs:
*
* FSDEF
* |
* +-----------+
* | |
* NFS AFS
* [v=1] [v=1]
*
* If an entry with the appropriate name does already exist, the version is
* compared. If the version is different, the entire subtree from that entry
* will be discarded and a new entry created.
*
* The new entry will be an index, and a cookie referring to it will be passed
* to the netfs. This is then the root handle by which the netfs accesses the
* cache. It can create whatever objects it likes in that index, including
* further indices.
*/
static struct fscache_cookie_def fscache_fsdef_index_def = {
.name = ".FS-Cache",
.type = FSCACHE_COOKIE_TYPE_INDEX,
};
struct fscache_cookie fscache_fsdef_index = {
.usage = ATOMIC_INIT(1),
.lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
.backing_objects = HLIST_HEAD_INIT,
.def = &fscache_fsdef_index_def,
};
EXPORT_SYMBOL(fscache_fsdef_index);
/*
* Definition of an entry in the root index. Each entry is an index, keyed to
* a specific netfs and only applicable to a particular version of the index
* structure used by that netfs.
*/
struct fscache_cookie_def fscache_fsdef_netfs_def = {
.name = "FSDEF.netfs",
.type = FSCACHE_COOKIE_TYPE_INDEX,
.get_key = fscache_fsdef_netfs_get_key,
.get_aux = fscache_fsdef_netfs_get_aux,
.check_aux = fscache_fsdef_netfs_check_aux,
};
/*
* get the key data for an FSDEF index record - this is the name of the netfs
* for which this entry is created
*/
static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct fscache_netfs *netfs = cookie_netfs_data;
unsigned klen;
_enter("{%s.%u},", netfs->name, netfs->version);
klen = strlen(netfs->name);
if (klen > bufmax)
return 0;
memcpy(buffer, netfs->name, klen);
return klen;
}
/*
* get the auxiliary data for an FSDEF index record - this is the index
* structure version number of the netfs for which this version is created
*/
static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data,
void *buffer, uint16_t bufmax)
{
const struct fscache_netfs *netfs = cookie_netfs_data;
unsigned dlen;
_enter("{%s.%u},", netfs->name, netfs->version);
dlen = sizeof(uint32_t);
if (dlen > bufmax)
return 0;
memcpy(buffer, &netfs->version, dlen);
return dlen;
}
/*
* check that the index structure version number stored in the auxiliary data
* matches the one the netfs gave us
*/
static enum fscache_checkaux fscache_fsdef_netfs_check_aux(
void *cookie_netfs_data,
const void *data,
uint16_t datalen)
{
struct fscache_netfs *netfs = cookie_netfs_data;
uint32_t version;
_enter("{%s},,%hu", netfs->name, datalen);
if (datalen != sizeof(version)) {
_leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version));
return FSCACHE_CHECKAUX_OBSOLETE;
}
memcpy(&version, data, sizeof(version));
if (version != netfs->version) {
_leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version);
return FSCACHE_CHECKAUX_OBSOLETE;
}
_leave(" = OKAY");
return FSCACHE_CHECKAUX_OKAY;
}
| gpl-2.0 |
Shaky156/TFP-Kernel-2.6.39 | sound/isa/galaxy/azt1605.c | 13133 | 2807 | /*
* Aztech AZT1605 Driver
* Copyright (C) 2007,2010 Rene Herman
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#define AZT1605
#define CRD_NAME "Aztech AZT1605"
#define DRV_NAME "AZT1605"
#define DEV_NAME "azt1605"
#define GALAXY_DSP_MAJOR 2
#define GALAXY_DSP_MINOR 1
#define GALAXY_CONFIG_SIZE 3
/*
* 24-bit config register
*/
#define GALAXY_CONFIG_SBA_220 (0 << 0)
#define GALAXY_CONFIG_SBA_240 (1 << 0)
#define GALAXY_CONFIG_SBA_260 (2 << 0)
#define GALAXY_CONFIG_SBA_280 (3 << 0)
#define GALAXY_CONFIG_SBA_MASK GALAXY_CONFIG_SBA_280
#define GALAXY_CONFIG_MPUA_300 (0 << 2)
#define GALAXY_CONFIG_MPUA_330 (1 << 2)
#define GALAXY_CONFIG_MPU_ENABLE (1 << 3)
#define GALAXY_CONFIG_GAME_ENABLE (1 << 4)
#define GALAXY_CONFIG_CD_PANASONIC (1 << 5)
#define GALAXY_CONFIG_CD_MITSUMI (1 << 6)
#define GALAXY_CONFIG_CD_MASK (\
GALAXY_CONFIG_CD_PANASONIC | GALAXY_CONFIG_CD_MITSUMI)
#define GALAXY_CONFIG_UNUSED (1 << 7)
#define GALAXY_CONFIG_UNUSED_MASK GALAXY_CONFIG_UNUSED
#define GALAXY_CONFIG_SBIRQ_2 (1 << 8)
#define GALAXY_CONFIG_SBIRQ_3 (1 << 9)
#define GALAXY_CONFIG_SBIRQ_5 (1 << 10)
#define GALAXY_CONFIG_SBIRQ_7 (1 << 11)
#define GALAXY_CONFIG_MPUIRQ_2 (1 << 12)
#define GALAXY_CONFIG_MPUIRQ_3 (1 << 13)
#define GALAXY_CONFIG_MPUIRQ_5 (1 << 14)
#define GALAXY_CONFIG_MPUIRQ_7 (1 << 15)
#define GALAXY_CONFIG_WSSA_530 (0 << 16)
#define GALAXY_CONFIG_WSSA_604 (1 << 16)
#define GALAXY_CONFIG_WSSA_E80 (2 << 16)
#define GALAXY_CONFIG_WSSA_F40 (3 << 16)
#define GALAXY_CONFIG_WSS_ENABLE (1 << 18)
#define GALAXY_CONFIG_CDIRQ_11 (1 << 19)
#define GALAXY_CONFIG_CDIRQ_12 (1 << 20)
#define GALAXY_CONFIG_CDIRQ_15 (1 << 21)
#define GALAXY_CONFIG_CDIRQ_MASK (\
GALAXY_CONFIG_CDIRQ_11 | GALAXY_CONFIG_CDIRQ_12 |\
GALAXY_CONFIG_CDIRQ_15)
#define GALAXY_CONFIG_CDDMA_DISABLE (0 << 22)
#define GALAXY_CONFIG_CDDMA_0 (1 << 22)
#define GALAXY_CONFIG_CDDMA_1 (2 << 22)
#define GALAXY_CONFIG_CDDMA_3 (3 << 22)
#define GALAXY_CONFIG_CDDMA_MASK GALAXY_CONFIG_CDDMA_3
#define GALAXY_CONFIG_MASK (\
GALAXY_CONFIG_SBA_MASK | GALAXY_CONFIG_CD_MASK |\
GALAXY_CONFIG_UNUSED_MASK | GALAXY_CONFIG_CDIRQ_MASK |\
GALAXY_CONFIG_CDDMA_MASK)
#include "galaxy.c"
| gpl-2.0 |
nullpo-head/linux | tools/lib/lockdep/preload.c | 590 | 12958 | #define _GNU_SOURCE
#include <pthread.h>
#include <stdio.h>
#include <dlfcn.h>
#include <stdlib.h>
#include <sysexits.h>
#include "include/liblockdep/mutex.h"
#include "../../../include/linux/rbtree.h"
/**
* struct lock_lookup - liblockdep's view of a single unique lock
* @orig: pointer to the original pthread lock, used for lookups
* @dep_map: lockdep's dep_map structure
* @key: lockdep's key structure
* @node: rb-tree node used to store the lock in a global tree
* @name: a unique name for the lock
*/
struct lock_lookup {
void *orig; /* Original pthread lock, used for lookups */
struct lockdep_map dep_map; /* Since all locks are dynamic, we need
* a dep_map and a key for each lock */
/*
* Wait, there's no support for key classes? Yup :(
* Most big projects wrap the pthread api with their own calls to
* be compatible with different locking methods. This means that
* "classes" will be brokes since the function that creates all
* locks will point to a generic locking function instead of the
* actual code that wants to do the locking.
*/
struct lock_class_key key;
struct rb_node node;
#define LIBLOCKDEP_MAX_LOCK_NAME 22
char name[LIBLOCKDEP_MAX_LOCK_NAME];
};
/* This is where we store our locks */
static struct rb_root locks = RB_ROOT;
static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
/* pthread mutex API */
#ifdef __GLIBC__
extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
#else
#define __pthread_mutex_init NULL
#define __pthread_mutex_lock NULL
#define __pthread_mutex_trylock NULL
#define __pthread_mutex_unlock NULL
#define __pthread_mutex_destroy NULL
#endif
static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr) = __pthread_mutex_init;
static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
/* pthread rwlock API */
#ifdef __GLIBC__
extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
#else
#define __pthread_rwlock_init NULL
#define __pthread_rwlock_destroy NULL
#define __pthread_rwlock_wrlock NULL
#define __pthread_rwlock_trywrlock NULL
#define __pthread_rwlock_rdlock NULL
#define __pthread_rwlock_tryrdlock NULL
#define __pthread_rwlock_unlock NULL
#endif
static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
enum { none, prepare, done, } __init_state;
static void init_preload(void);
static void try_init_preload(void)
{
if (__init_state != done)
init_preload();
}
static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
{
struct rb_node **node = &locks.rb_node;
struct lock_lookup *l;
*parent = NULL;
while (*node) {
l = rb_entry(*node, struct lock_lookup, node);
*parent = *node;
if (lock < l->orig)
node = &l->node.rb_left;
else if (lock > l->orig)
node = &l->node.rb_right;
else
return node;
}
return node;
}
#ifndef LIBLOCKDEP_STATIC_ENTRIES
#define LIBLOCKDEP_STATIC_ENTRIES 1024
#endif
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
static int __locks_nr;
static inline bool is_static_lock(struct lock_lookup *lock)
{
return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
}
static struct lock_lookup *alloc_lock(void)
{
if (__init_state != done) {
/*
* Some programs attempt to initialize and use locks in their
* allocation path. This means that a call to malloc() would
* result in locks being initialized and locked.
*
* Why is it an issue for us? dlsym() below will try allocating
* to give us the original function. Since this allocation will
* result in a locking operations, we have to let pthread deal
* with it, but we can't! we don't have the pointer to the
* original API since we're inside dlsym() trying to get it
*/
int idx = __locks_nr++;
if (idx >= ARRAY_SIZE(__locks)) {
fprintf(stderr,
"LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
exit(EX_UNAVAILABLE);
}
return __locks + idx;
}
return malloc(sizeof(struct lock_lookup));
}
static inline void free_lock(struct lock_lookup *lock)
{
if (likely(!is_static_lock(lock)))
free(lock);
}
/**
* __get_lock - find or create a lock instance
* @lock: pointer to a pthread lock function
*
* Try to find an existing lock in the rbtree using the provided pointer. If
* one wasn't found - create it.
*/
static struct lock_lookup *__get_lock(void *lock)
{
struct rb_node **node, *parent;
struct lock_lookup *l;
ll_pthread_rwlock_rdlock(&locks_rwlock);
node = __get_lock_node(lock, &parent);
ll_pthread_rwlock_unlock(&locks_rwlock);
if (*node) {
return rb_entry(*node, struct lock_lookup, node);
}
/* We didn't find the lock, let's create it */
l = alloc_lock();
if (l == NULL)
return NULL;
l->orig = lock;
/*
* Currently the name of the lock is the ptr value of the pthread lock,
* while not optimal, it makes debugging a bit easier.
*
* TODO: Get the real name of the lock using libdwarf
*/
sprintf(l->name, "%p", lock);
lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
ll_pthread_rwlock_wrlock(&locks_rwlock);
/* This might have changed since the last time we fetched it */
node = __get_lock_node(lock, &parent);
rb_link_node(&l->node, parent, node);
rb_insert_color(&l->node, &locks);
ll_pthread_rwlock_unlock(&locks_rwlock);
return l;
}
static void __del_lock(struct lock_lookup *lock)
{
ll_pthread_rwlock_wrlock(&locks_rwlock);
rb_erase(&lock->node, &locks);
ll_pthread_rwlock_unlock(&locks_rwlock);
free_lock(lock);
}
int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr)
{
int r;
/*
* We keep trying to init our preload module because there might be
* code in init sections that tries to touch locks before we are
* initialized, in that case we'll need to manually call preload
* to get us going.
*
* Funny enough, kernel's lockdep had the same issue, and used
* (almost) the same solution. See look_up_lock_class() in
* kernel/locking/lockdep.c for details.
*/
try_init_preload();
r = ll_pthread_mutex_init(mutex, attr);
if (r == 0)
/*
* We do a dummy initialization here so that lockdep could
* warn us if something fishy is going on - such as
* initializing a held lock.
*/
__get_lock(mutex);
return r;
}
int pthread_mutex_lock(pthread_mutex_t *mutex)
{
int r;
try_init_preload();
lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
(unsigned long)_RET_IP_);
/*
* Here's the thing with pthread mutexes: unlike the kernel variant,
* they can fail.
*
* This means that the behaviour here is a bit different from what's
* going on in the kernel: there we just tell lockdep that we took the
* lock before actually taking it, but here we must deal with the case
* that locking failed.
*
* To do that we'll "release" the lock if locking failed - this way
* we'll get lockdep doing the correct checks when we try to take
* the lock, and if that fails - we'll be back to the correct
* state by releasing it.
*/
r = ll_pthread_mutex_lock(mutex);
if (r)
lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_mutex_trylock(pthread_mutex_t *mutex)
{
int r;
try_init_preload();
lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_mutex_trylock(mutex);
if (r)
lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
int r;
try_init_preload();
lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
/*
* Just like taking a lock, only in reverse!
*
* If we fail releasing the lock, tell lockdep we're holding it again.
*/
r = ll_pthread_mutex_unlock(mutex);
if (r)
lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
int pthread_mutex_destroy(pthread_mutex_t *mutex)
{
try_init_preload();
/*
* Let's see if we're releasing a lock that's held.
*
* TODO: Hook into free() and add that check there as well.
*/
debug_check_no_locks_freed(mutex, sizeof(*mutex));
__del_lock(__get_lock(mutex));
return ll_pthread_mutex_destroy(mutex);
}
/* This is the rwlock part, very similar to what happened with mutex above */
int pthread_rwlock_init(pthread_rwlock_t *rwlock,
const pthread_rwlockattr_t *attr)
{
int r;
try_init_preload();
r = ll_pthread_rwlock_init(rwlock, attr);
if (r == 0)
__get_lock(rwlock);
return r;
}
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
try_init_preload();
debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
__del_lock(__get_lock(rwlock));
return ll_pthread_rwlock_destroy(rwlock);
}
int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
{
int r;
init_preload();
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_rdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
{
int r;
init_preload();
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_tryrdlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
{
int r;
init_preload();
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_trywrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
{
int r;
init_preload();
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_wrlock(rwlock);
if (r)
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
return r;
}
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
int r;
init_preload();
lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
r = ll_pthread_rwlock_unlock(rwlock);
if (r)
lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
return r;
}
__attribute__((constructor)) static void init_preload(void)
{
if (__init_state == done)
return;
#ifndef __GLIBC__
__init_state = prepare;
ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
#endif
lockdep_init();
__init_state = done;
}
| gpl-2.0 |
iamroot12C/linux | drivers/media/pci/cx88/cx88-alsa.c | 846 | 25792 | /*
*
* Support for audio capture
* PCI function #1 of the cx2388x.
*
* (c) 2007 Trent Piepho <xyzzy@speakeasy.org>
* (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org>
* (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
* Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org>
* Based on dummy.c by Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <media/wm8775.h>
#include "cx88.h"
#include "cx88-reg.h"
#define dprintk(level, fmt, arg...) do { \
if (debug + 1 > level) \
printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
} while(0)
#define dprintk_core(level, fmt, arg...) do { \
if (debug + 1 > level) \
printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
} while(0)
/****************************************************************************
Data type declarations - Can be moded to a header file later
****************************************************************************/
struct cx88_audio_buffer {
unsigned int bpl;
struct cx88_riscmem risc;
void *vaddr;
struct scatterlist *sglist;
int sglen;
int nr_pages;
};
struct cx88_audio_dev {
struct cx88_core *core;
struct cx88_dmaqueue q;
/* pci i/o */
struct pci_dev *pci;
/* audio controls */
int irq;
struct snd_card *card;
spinlock_t reg_lock;
atomic_t count;
unsigned int dma_size;
unsigned int period_size;
unsigned int num_periods;
struct cx88_audio_buffer *buf;
struct snd_pcm_substream *substream;
};
typedef struct cx88_audio_dev snd_cx88_card_t;
/****************************************************************************
Module global static vars
****************************************************************************/
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
/****************************************************************************
Module macros
****************************************************************************/
MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
"{{Conexant,23882},"
"{{Conexant,23883}");
static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages");
/****************************************************************************
Module specific funtions
****************************************************************************/
/*
* BOARD Specific: Sets audio DMA
*/
static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
{
struct cx88_audio_buffer *buf = chip->buf;
struct cx88_core *core=chip->core;
const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
cx_clear(MO_AUD_DMACNTRL, 0x11);
/* setup fifo + format - out channel */
cx88_sram_channel_setup(chip->core, audio_ch, buf->bpl, buf->risc.dma);
/* sets bpl size */
cx_write(MO_AUDD_LNGTH, buf->bpl);
/* reset counter */
cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET);
atomic_set(&chip->count, 0);
dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
"byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1,
chip->num_periods, buf->bpl * chip->num_periods);
/* Enables corresponding bits at AUD_INT_STAT */
cx_write(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
/* Clean any pending interrupt bits already set */
cx_write(MO_AUD_INTSTAT, ~0);
/* enable audio irqs */
cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT);
/* start dma */
cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */
if (debug)
cx88_sram_channel_dump(chip->core, audio_ch);
return 0;
}
/*
* BOARD Specific: Resets audio DMA
*/
static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
{
struct cx88_core *core=chip->core;
dprintk(1, "Stopping audio DMA\n");
/* stop dma */
cx_clear(MO_AUD_DMACNTRL, 0x11);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT);
cx_clear(MO_AUD_INTMSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC |
AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
if (debug)
cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
return 0;
}
#define MAX_IRQ_LOOP 50
/*
* BOARD Specific: IRQ dma bits
*/
static const char *cx88_aud_irqs[32] = {
"dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
NULL, /* reserved */
"dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
NULL, /* reserved */
"dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */
NULL, /* reserved */
"dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */
NULL, /* reserved */
"opc_err", "par_err", "rip_err", /* 16-18 */
"pci_abort", "ber_irq", "mchg_irq" /* 19-21 */
};
/*
* BOARD Specific: Threats IRQ audio specific calls
*/
static void cx8801_aud_irq(snd_cx88_card_t *chip)
{
struct cx88_core *core = chip->core;
u32 status, mask;
status = cx_read(MO_AUD_INTSTAT);
mask = cx_read(MO_AUD_INTMSK);
if (0 == (status & mask))
return;
cx_write(MO_AUD_INTSTAT, status);
if (debug > 1 || (status & mask & ~0xff))
cx88_print_irqbits(core->name, "irq aud",
cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs),
status, mask);
/* risc op code error */
if (status & AUD_INT_OPC_ERR) {
printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name);
cx_clear(MO_AUD_DMACNTRL, 0x11);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]);
}
if (status & AUD_INT_DN_SYNC) {
dprintk(1, "Downstream sync error\n");
cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET);
return;
}
/* risc1 downstream */
if (status & AUD_INT_DN_RISCI1) {
atomic_set(&chip->count, cx_read(MO_AUDD_GPCNT));
snd_pcm_period_elapsed(chip->substream);
}
/* FIXME: Any other status should deserve a special handling? */
}
/*
* BOARD Specific: Handles IRQ calls
*/
static irqreturn_t cx8801_irq(int irq, void *dev_id)
{
snd_cx88_card_t *chip = dev_id;
struct cx88_core *core = chip->core;
u32 status;
int loop, handled = 0;
for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_AUDINT);
if (0 == status)
goto out;
dprintk(3, "cx8801_irq loop %d/%d, status %x\n",
loop, MAX_IRQ_LOOP, status);
handled = 1;
cx_write(MO_PCI_INTSTAT, status);
if (status & core->pci_irqmask)
cx88_core_irq(core, status);
if (status & PCI_INT_AUDINT)
cx8801_aud_irq(chip);
}
if (MAX_IRQ_LOOP == loop) {
printk(KERN_ERR
"%s/1: IRQ loop detected, disabling interrupts\n",
core->name);
cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT);
}
out:
return IRQ_RETVAL(handled);
}
static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
{
struct cx88_audio_buffer *buf = chip->buf;
struct page *pg;
int i;
buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
if (NULL == buf->vaddr) {
dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
return -ENOMEM;
}
dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
(unsigned long)buf->vaddr,
nr_pages << PAGE_SHIFT);
memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
buf->nr_pages = nr_pages;
buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
if (NULL == buf->sglist)
goto vzalloc_err;
sg_init_table(buf->sglist, buf->nr_pages);
for (i = 0; i < buf->nr_pages; i++) {
pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
if (NULL == pg)
goto vmalloc_to_page_err;
sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
}
return 0;
vmalloc_to_page_err:
vfree(buf->sglist);
buf->sglist = NULL;
vzalloc_err:
vfree(buf->vaddr);
buf->vaddr = NULL;
return -ENOMEM;
}
static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
{
struct cx88_audio_buffer *buf = dev->buf;
buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
buf->nr_pages, PCI_DMA_FROMDEVICE);
if (0 == buf->sglen) {
pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
return -ENOMEM;
}
return 0;
}
static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
{
struct cx88_audio_buffer *buf = dev->buf;
if (!buf->sglen)
return 0;
dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
buf->sglen = 0;
return 0;
}
static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
{
vfree(buf->sglist);
buf->sglist = NULL;
vfree(buf->vaddr);
buf->vaddr = NULL;
return 0;
}
static int dsp_buffer_free(snd_cx88_card_t *chip)
{
struct cx88_riscmem *risc = &chip->buf->risc;
BUG_ON(!chip->dma_size);
dprintk(2,"Freeing buffer\n");
cx88_alsa_dma_unmap(chip);
cx88_alsa_dma_free(chip->buf);
if (risc->cpu)
pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
kfree(chip->buf);
chip->buf = NULL;
return 0;
}
/****************************************************************************
ALSA PCM Interface
****************************************************************************/
/*
* Digital hardware definition
*/
#define DEFAULT_FIFO_SIZE 4096
static const struct snd_pcm_hardware snd_cx88_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
/* Analog audio output will be full of clicks and pops if there
are not exactly four lines in the SRAM FIFO buffer. */
.period_bytes_min = DEFAULT_FIFO_SIZE/4,
.period_bytes_max = DEFAULT_FIFO_SIZE/4,
.periods_min = 1,
.periods_max = 1024,
.buffer_bytes_max = (1024*1024),
};
/*
* audio pcm capture open callback
*/
static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
if (!chip) {
printk(KERN_ERR "BUG: cx88 can't find device struct."
" Can't proceed with open\n");
return -ENODEV;
}
err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0)
goto _error;
chip->substream = substream;
runtime->hw = snd_cx88_digital_hw;
if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) {
unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
bpl &= ~7; /* must be multiple of 8 */
runtime->hw.period_bytes_min = bpl;
runtime->hw.period_bytes_max = bpl;
}
return 0;
_error:
dprintk(1,"Error opening PCM!\n");
return err;
}
/*
* audio close callback
*/
static int snd_cx88_close(struct snd_pcm_substream *substream)
{
return 0;
}
/*
* hw_params callback
*/
static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
struct snd_pcm_hw_params * hw_params)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
struct cx88_audio_buffer *buf;
int ret;
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
substream->runtime->dma_area = NULL;
}
chip->period_size = params_period_bytes(hw_params);
chip->num_periods = params_periods(hw_params);
chip->dma_size = chip->period_size * params_periods(hw_params);
BUG_ON(!chip->dma_size);
BUG_ON(chip->num_periods & (chip->num_periods-1));
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (NULL == buf)
return -ENOMEM;
chip->buf = buf;
buf->bpl = chip->period_size;
ret = cx88_alsa_dma_init(chip,
(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
if (ret < 0)
goto error;
ret = cx88_alsa_dma_map(chip);
if (ret < 0)
goto error;
ret = cx88_risc_databuffer(chip->pci, &buf->risc, buf->sglist,
chip->period_size, chip->num_periods, 1);
if (ret < 0)
goto error;
/* Loop back to start of program */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
substream->runtime->dma_area = chip->buf->vaddr;
substream->runtime->dma_bytes = chip->dma_size;
substream->runtime->dma_addr = 0;
return 0;
error:
kfree(buf);
return ret;
}
/*
* hw free callback
*/
static int snd_cx88_hw_free(struct snd_pcm_substream * substream)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
if (substream->runtime->dma_area) {
dsp_buffer_free(chip);
substream->runtime->dma_area = NULL;
}
return 0;
}
/*
* prepare callback
*/
static int snd_cx88_prepare(struct snd_pcm_substream *substream)
{
return 0;
}
/*
* trigger callback
*/
static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
int err;
/* Local interrupts are already disabled by ALSA */
spin_lock(&chip->reg_lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
err=_cx88_start_audio_dma(chip);
break;
case SNDRV_PCM_TRIGGER_STOP:
err=_cx88_stop_audio_dma(chip);
break;
default:
err=-EINVAL;
break;
}
spin_unlock(&chip->reg_lock);
return err;
}
/*
* pointer callback
*/
static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
{
snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
u16 count;
count = atomic_read(&chip->count);
// dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__,
// count, new, count & (runtime->periods-1),
// runtime->period_size * (count & (runtime->periods-1)));
return runtime->period_size * (count & (runtime->periods-1));
}
/*
* page callback (needed for mmap)
*/
static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
unsigned long offset)
{
void *pageptr = substream->runtime->dma_area + offset;
return vmalloc_to_page(pageptr);
}
/*
* operators
*/
static struct snd_pcm_ops snd_cx88_pcm_ops = {
.open = snd_cx88_pcm_open,
.close = snd_cx88_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_cx88_hw_params,
.hw_free = snd_cx88_hw_free,
.prepare = snd_cx88_prepare,
.trigger = snd_cx88_card_trigger,
.pointer = snd_cx88_pointer,
.page = snd_cx88_page,
};
/*
* create a PCM device
*/
static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
{
int err;
struct snd_pcm *pcm;
err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
if (err < 0)
return err;
pcm->private_data = chip;
strcpy(pcm->name, name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx88_pcm_ops);
return 0;
}
/****************************************************************************
CONTROL INTERFACE
****************************************************************************/
static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 2;
info->value.integer.min = 0;
info->value.integer.max = 0x3f;
return 0;
}
static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core=chip->core;
int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f),
bal = cx_read(AUD_BAL_CTL);
value->value.integer.value[(bal & 0x40) ? 0 : 1] = vol;
vol -= (bal & 0x3f);
value->value.integer.value[(bal & 0x40) ? 1 : 0] = vol < 0 ? 0 : vol;
return 0;
}
static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
int left = value->value.integer.value[0];
int right = value->value.integer.value[1];
int v, b;
/* Pass volume & balance onto any WM8775 */
if (left >= right) {
v = left << 10;
b = left ? (0x8000 * right) / left : 0x8000;
} else {
v = right << 10;
b = right ? 0xffff - (0x8000 * left) / right : 0x8000;
}
wm8775_s_ctrl(core, V4L2_CID_AUDIO_VOLUME, v);
wm8775_s_ctrl(core, V4L2_CID_AUDIO_BALANCE, b);
}
/* OK - TODO: test it */
static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core=chip->core;
int left, right, v, b;
int changed = 0;
u32 old;
if (core->sd_wm8775)
snd_cx88_wm8775_volume_put(kcontrol, value);
left = value->value.integer.value[0] & 0x3f;
right = value->value.integer.value[1] & 0x3f;
b = right - left;
if (b < 0) {
v = 0x3f - left;
b = (-b) | 0x40;
} else {
v = 0x3f - right;
}
/* Do we really know this will always be called with IRQs on? */
spin_lock_irq(&chip->reg_lock);
old = cx_read(AUD_VOL_CTL);
if (v != (old & 0x3f)) {
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v);
changed = 1;
}
if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) {
cx_write(AUD_BAL_CTL, b);
changed = 1;
}
spin_unlock_irq(&chip->reg_lock);
return changed;
}
static const DECLARE_TLV_DB_SCALE(snd_cx88_db_scale, -6300, 100, 0);
static const struct snd_kcontrol_new snd_cx88_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
.name = "Analog-TV Volume",
.info = snd_cx88_volume_info,
.get = snd_cx88_volume_get,
.put = snd_cx88_volume_put,
.tlv.p = snd_cx88_db_scale,
};
static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
value->value.integer.value[0] = !(cx_read(AUD_VOL_CTL) & bit);
return 0;
}
static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
u32 bit = kcontrol->private_value;
int ret = 0;
u32 vol;
spin_lock_irq(&chip->reg_lock);
vol = cx_read(AUD_VOL_CTL);
if (value->value.integer.value[0] != !(vol & bit)) {
vol ^= bit;
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
if (core->sd_wm8775 && ((1<<6) == bit))
wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
return ret;
}
static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Audio-Out Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<8),
};
static const struct snd_kcontrol_new snd_cx88_source_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog-TV Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<6),
};
static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
s32 val;
val = wm8775_g_ctrl(core, V4L2_CID_AUDIO_LOUDNESS);
value->value.integer.value[0] = val ? 1 : 0;
return 0;
}
static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
struct v4l2_control client_ctl;
memset(&client_ctl, 0, sizeof(client_ctl));
client_ctl.value = 0 != value->value.integer.value[0];
client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
return 0;
}
static struct snd_kcontrol_new snd_cx88_alc_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-In ALC Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_alc_get,
.put = snd_cx88_alc_put,
};
/****************************************************************************
Basic Flow for Sound Devices
****************************************************************************/
/*
* PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
static const struct pci_device_id cx88_audio_pci_tbl[] = {
{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
{0, }
};
MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
/*
* Chip-specific destructor
*/
static int snd_cx88_free(snd_cx88_card_t *chip)
{
if (chip->irq >= 0)
free_irq(chip->irq, chip);
cx88_core_put(chip->core,chip->pci);
pci_disable_device(chip->pci);
return 0;
}
/*
* Component Destructor
*/
static void snd_cx88_dev_free(struct snd_card * card)
{
snd_cx88_card_t *chip = card->private_data;
snd_cx88_free(chip);
}
/*
* Alsa Constructor - Component probe
*/
static int devno;
static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
snd_cx88_card_t **rchip,
struct cx88_core **core_ptr)
{
snd_cx88_card_t *chip;
struct cx88_core *core;
int err;
unsigned char pci_lat;
*rchip = NULL;
err = pci_enable_device(pci);
if (err < 0)
return err;
pci_set_master(pci);
chip = card->private_data;
core = cx88_core_get(pci);
if (NULL == core) {
err = -EINVAL;
return err;
}
if (!pci_dma_supported(pci,DMA_BIT_MASK(32))) {
dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
err = -EIO;
cx88_core_put(core, pci);
return err;
}
/* pci init */
chip->card = card;
chip->pci = pci;
chip->irq = -1;
spin_lock_init(&chip->reg_lock);
chip->core = core;
/* get irq */
err = request_irq(chip->pci->irq, cx8801_irq,
IRQF_SHARED, chip->core->name, chip);
if (err < 0) {
dprintk(0, "%s: can't get IRQ %d\n",
chip->core->name, chip->pci->irq);
return err;
}
/* print pci info */
pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat);
dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
"latency: %d, mmio: 0x%llx\n", core->name, devno,
pci_name(pci), pci->revision, pci->irq,
pci_lat, (unsigned long long)pci_resource_start(pci,0));
chip->irq = pci->irq;
synchronize_irq(chip->irq);
*rchip = chip;
*core_ptr = core;
return 0;
}
static int cx88_audio_initdev(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
snd_cx88_card_t *chip;
struct cx88_core *core = NULL;
int err;
if (devno >= SNDRV_CARDS)
return (-ENODEV);
if (!enable[devno]) {
++devno;
return (-ENOENT);
}
err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
sizeof(snd_cx88_card_t), &card);
if (err < 0)
return err;
card->private_free = snd_cx88_dev_free;
err = snd_cx88_create(card, pci, &chip, &core);
if (err < 0)
goto error;
err = snd_cx88_pcm(chip, 0, "CX88 Digital");
if (err < 0)
goto error;
err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_volume, chip));
if (err < 0)
goto error;
err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_dac_switch, chip));
if (err < 0)
goto error;
err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_source_switch, chip));
if (err < 0)
goto error;
/* If there's a wm8775 then add a Line-In ALC switch */
if (core->sd_wm8775)
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
strcpy (card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",
card->shortname,(unsigned long long)pci_resource_start(pci, 0));
strcpy (card->mixername, "CX88");
dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
card->driver,devno);
err = snd_card_register(card);
if (err < 0)
goto error;
pci_set_drvdata(pci,card);
devno++;
return 0;
error:
snd_card_free(card);
return err;
}
/*
* ALSA destructor
*/
static void cx88_audio_finidev(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
snd_card_free(card);
devno--;
}
/*
* PCI driver definition
*/
static struct pci_driver cx88_audio_pci_driver = {
.name = "cx88_audio",
.id_table = cx88_audio_pci_tbl,
.probe = cx88_audio_initdev,
.remove = cx88_audio_finidev,
};
module_pci_driver(cx88_audio_pci_driver);
| gpl-2.0 |
EPDCenter/android_kernel_archos_97_titan | fs/ubifs/tnc_commit.c | 2382 | 27673 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Adrian Hunter
* Artem Bityutskiy (Битюцкий Артём)
*/
/* This file implements TNC functions for committing */
#include "ubifs.h"
/**
* make_idx_node - make an index node for fill-the-gaps method of TNC commit.
* @c: UBIFS file-system description object
* @idx: buffer in which to place new index node
* @znode: znode from which to make new index node
* @lnum: LEB number where new index node will be written
* @offs: offset where new index node will be written
* @len: length of new index node
*/
static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
struct ubifs_znode *znode, int lnum, int offs, int len)
{
struct ubifs_znode *zp;
int i, err;
/* Make index node */
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(znode->child_cnt);
idx->level = cpu_to_le16(znode->level);
for (i = 0; i < znode->child_cnt; i++) {
struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
struct ubifs_zbranch *zbr = &znode->zbranch[i];
key_write_idx(c, &zbr->key, &br->key);
br->lnum = cpu_to_le32(zbr->lnum);
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
ubifs_err("bad ref in znode");
dbg_dump_znode(c, znode);
if (zbr->znode)
dbg_dump_znode(c, zbr->znode);
}
}
ubifs_prepare_node(c, idx, len, 0);
#ifdef CONFIG_UBIFS_FS_DEBUG
znode->lnum = lnum;
znode->offs = offs;
znode->len = len;
#endif
err = insert_old_idx_znode(c, znode);
/* Update the parent */
zp = znode->parent;
if (zp) {
struct ubifs_zbranch *zbr;
zbr = &zp->zbranch[znode->iip];
zbr->lnum = lnum;
zbr->offs = offs;
zbr->len = len;
} else {
c->zroot.lnum = lnum;
c->zroot.offs = offs;
c->zroot.len = len;
}
c->calc_idx_sz += ALIGN(len, 8);
atomic_long_dec(&c->dirty_zn_cnt);
ubifs_assert(ubifs_zn_dirty(znode));
ubifs_assert(test_bit(COW_ZNODE, &znode->flags));
__clear_bit(DIRTY_ZNODE, &znode->flags);
__clear_bit(COW_ZNODE, &znode->flags);
return err;
}
/**
* fill_gap - make index nodes in gaps in dirty index LEBs.
* @c: UBIFS file-system description object
* @lnum: LEB number that gap appears in
* @gap_start: offset of start of gap
* @gap_end: offset of end of gap
* @dirt: adds dirty space to this
*
* This function returns the number of index nodes written into the gap.
*/
static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
int *dirt)
{
int len, gap_remains, gap_pos, written, pad_len;
ubifs_assert((gap_start & 7) == 0);
ubifs_assert((gap_end & 7) == 0);
ubifs_assert(gap_end >= gap_start);
gap_remains = gap_end - gap_start;
if (!gap_remains)
return 0;
gap_pos = gap_start;
written = 0;
while (c->enext) {
len = ubifs_idx_node_sz(c, c->enext->child_cnt);
if (len < gap_remains) {
struct ubifs_znode *znode = c->enext;
const int alen = ALIGN(len, 8);
int err;
ubifs_assert(alen <= gap_remains);
err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
lnum, gap_pos, len);
if (err)
return err;
gap_remains -= alen;
gap_pos += alen;
c->enext = znode->cnext;
if (c->enext == c->cnext)
c->enext = NULL;
written += 1;
} else
break;
}
if (gap_end == c->leb_size) {
c->ileb_len = ALIGN(gap_pos, c->min_io_size);
/* Pad to end of min_io_size */
pad_len = c->ileb_len - gap_pos;
} else
/* Pad to end of gap */
pad_len = gap_remains;
dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
*dirt += pad_len;
return written;
}
/**
* find_old_idx - find an index node obsoleted since the last commit start.
* @c: UBIFS file-system description object
* @lnum: LEB number of obsoleted index node
* @offs: offset of obsoleted index node
*
* Returns %1 if found and %0 otherwise.
*/
static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
{
struct ubifs_old_idx *o;
struct rb_node *p;
p = c->old_idx.rb_node;
while (p) {
o = rb_entry(p, struct ubifs_old_idx, rb);
if (lnum < o->lnum)
p = p->rb_left;
else if (lnum > o->lnum)
p = p->rb_right;
else if (offs < o->offs)
p = p->rb_left;
else if (offs > o->offs)
p = p->rb_right;
else
return 1;
}
return 0;
}
/**
* is_idx_node_in_use - determine if an index node can be overwritten.
* @c: UBIFS file-system description object
* @key: key of index node
* @level: index node level
* @lnum: LEB number of index node
* @offs: offset of index node
*
* If @key / @lnum / @offs identify an index node that was not part of the old
* index, then this function returns %0 (obsolete). Else if the index node was
* part of the old index but is now dirty %1 is returned, else if it is clean %2
* is returned. A negative error code is returned on failure.
*/
static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
int level, int lnum, int offs)
{
int ret;
ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
if (ret < 0)
return ret; /* Error code */
if (ret == 0)
if (find_old_idx(c, lnum, offs))
return 1;
return ret;
}
/**
* layout_leb_in_gaps - layout index nodes using in-the-gaps method.
* @c: UBIFS file-system description object
* @p: return LEB number here
*
* This function lays out new index nodes for dirty znodes using in-the-gaps
* method of TNC commit.
* This function merely puts the next znode into the next gap, making no attempt
* to try to maximise the number of znodes that fit.
* This function returns the number of index nodes written into the gaps, or a
* negative error code on failure.
*/
static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
tot_written = 0;
/* Get an index LEB with lots of obsolete index nodes */
lnum = ubifs_find_dirty_idx_leb(c);
if (lnum < 0)
/*
* There also may be dirt in the index head that could be
* filled, however we do not check there at present.
*/
return lnum; /* Error code */
*p = lnum;
dbg_gc("LEB %d", lnum);
/*
* Scan the index LEB. We use the generic scan for this even though
* it is more comprehensive and less efficient than is needed for this
* purpose.
*/
sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
c->ileb_len = 0;
if (IS_ERR(sleb))
return PTR_ERR(sleb);
gap_start = 0;
list_for_each_entry(snod, &sleb->nodes, list) {
struct ubifs_idx_node *idx;
int in_use, level;
ubifs_assert(snod->type == UBIFS_IDX_NODE);
idx = snod->node;
key_read(c, ubifs_idx_key(c, idx), &snod->key);
level = le16_to_cpu(idx->level);
/* Determine if the index node is in use (not obsolete) */
in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
snod->offs);
if (in_use < 0) {
ubifs_scan_destroy(sleb);
return in_use; /* Error code */
}
if (in_use) {
if (in_use == 1)
dirt += ALIGN(snod->len, 8);
/*
* The obsolete index nodes form gaps that can be
* overwritten. This gap has ended because we have
* found an index node that is still in use
* i.e. not obsolete
*/
gap_end = snod->offs;
/* Try to fill gap */
written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
if (written < 0) {
ubifs_scan_destroy(sleb);
return written; /* Error code */
}
tot_written += written;
gap_start = ALIGN(snod->offs + snod->len, 8);
}
}
ubifs_scan_destroy(sleb);
c->ileb_len = c->leb_size;
gap_end = c->leb_size;
/* Try to fill gap */
written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
if (written < 0)
return written; /* Error code */
tot_written += written;
if (tot_written == 0) {
struct ubifs_lprops lp;
dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
err = ubifs_read_one_lp(c, lnum, &lp);
if (err)
return err;
if (lp.free == c->leb_size) {
/*
* We must have snatched this LEB from the idx_gc list
* so we need to correct the free and dirty space.
*/
err = ubifs_change_one_lp(c, lnum,
c->leb_size - c->ileb_len,
dirt, 0, 0, 0);
if (err)
return err;
}
return 0;
}
err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
0, 0, 0);
if (err)
return err;
err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len,
UBI_SHORTTERM);
if (err)
return err;
dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
return tot_written;
}
/**
* get_leb_cnt - calculate the number of empty LEBs needed to commit.
* @c: UBIFS file-system description object
* @cnt: number of znodes to commit
*
* This function returns the number of empty LEBs needed to commit @cnt znodes
* to the current index head. The number is not exact and may be more than
* needed.
*/
static int get_leb_cnt(struct ubifs_info *c, int cnt)
{
int d;
/* Assume maximum index node size (i.e. overestimate space needed) */
cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
if (cnt < 0)
cnt = 0;
d = c->leb_size / c->max_idx_node_sz;
return DIV_ROUND_UP(cnt, d);
}
/**
* layout_in_gaps - in-the-gaps method of committing TNC.
* @c: UBIFS file-system description object
* @cnt: number of dirty znodes to commit.
*
* This function lays out new index nodes for dirty znodes using in-the-gaps
* method of TNC commit.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int layout_in_gaps(struct ubifs_info *c, int cnt)
{
int err, leb_needed_cnt, written, *p;
dbg_gc("%d znodes to write", cnt);
c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
if (!c->gap_lebs)
return -ENOMEM;
p = c->gap_lebs;
do {
ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
if (err != -ENOSPC) {
kfree(c->gap_lebs);
c->gap_lebs = NULL;
return err;
}
if (dbg_force_in_the_gaps_enabled()) {
/*
* Do not print scary warnings if the debugging
* option which forces in-the-gaps is enabled.
*/
ubifs_warn("out of space");
dbg_dump_budg(c, &c->bi);
dbg_dump_lprops(c);
}
/* Try to commit anyway */
err = 0;
break;
}
p++;
cnt -= written;
leb_needed_cnt = get_leb_cnt(c, cnt);
dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
leb_needed_cnt, c->ileb_cnt);
} while (leb_needed_cnt > c->ileb_cnt);
*p = -1;
return 0;
}
/**
* layout_in_empty_space - layout index nodes in empty space.
* @c: UBIFS file-system description object
*
* This function lays out new index nodes for dirty znodes using empty LEBs.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int layout_in_empty_space(struct ubifs_info *c)
{
struct ubifs_znode *znode, *cnext, *zp;
int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
int wlen, blen, err;
cnext = c->enext;
if (!cnext)
return 0;
lnum = c->ihead_lnum;
buf_offs = c->ihead_offs;
buf_len = ubifs_idx_node_sz(c, c->fanout);
buf_len = ALIGN(buf_len, c->min_io_size);
used = 0;
avail = buf_len;
/* Ensure there is enough room for first write */
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
if (buf_offs + next_len > c->leb_size)
lnum = -1;
while (1) {
znode = cnext;
len = ubifs_idx_node_sz(c, znode->child_cnt);
/* Determine the index node position */
if (lnum == -1) {
if (c->ileb_nxt >= c->ileb_cnt) {
ubifs_err("out of space");
return -ENOSPC;
}
lnum = c->ilebs[c->ileb_nxt++];
buf_offs = 0;
used = 0;
avail = buf_len;
}
offs = buf_offs + used;
#ifdef CONFIG_UBIFS_FS_DEBUG
znode->lnum = lnum;
znode->offs = offs;
znode->len = len;
#endif
/* Update the parent */
zp = znode->parent;
if (zp) {
struct ubifs_zbranch *zbr;
int i;
i = znode->iip;
zbr = &zp->zbranch[i];
zbr->lnum = lnum;
zbr->offs = offs;
zbr->len = len;
} else {
c->zroot.lnum = lnum;
c->zroot.offs = offs;
c->zroot.len = len;
}
c->calc_idx_sz += ALIGN(len, 8);
/*
* Once lprops is updated, we can decrease the dirty znode count
* but it is easier to just do it here.
*/
atomic_long_dec(&c->dirty_zn_cnt);
/*
* Calculate the next index node length to see if there is
* enough room for it
*/
cnext = znode->cnext;
if (cnext == c->cnext)
next_len = 0;
else
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
if (c->min_io_size == 1) {
buf_offs += ALIGN(len, 8);
if (next_len) {
if (buf_offs + next_len <= c->leb_size)
continue;
err = ubifs_update_one_lp(c, lnum, 0,
c->leb_size - buf_offs, 0, 0);
if (err)
return err;
lnum = -1;
continue;
}
err = ubifs_update_one_lp(c, lnum,
c->leb_size - buf_offs, 0, 0, 0);
if (err)
return err;
break;
}
/* Update buffer positions */
wlen = used + len;
used += ALIGN(len, 8);
avail -= ALIGN(len, 8);
if (next_len != 0 &&
buf_offs + used + next_len <= c->leb_size &&
avail > 0)
continue;
if (avail <= 0 && next_len &&
buf_offs + used + next_len <= c->leb_size)
blen = buf_len;
else
blen = ALIGN(wlen, c->min_io_size);
/* The buffer is full or there are no more znodes to do */
buf_offs += blen;
if (next_len) {
if (buf_offs + next_len > c->leb_size) {
err = ubifs_update_one_lp(c, lnum,
c->leb_size - buf_offs, blen - used,
0, 0);
if (err)
return err;
lnum = -1;
}
used -= blen;
if (used < 0)
used = 0;
avail = buf_len - used;
continue;
}
err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
blen - used, 0, 0);
if (err)
return err;
break;
}
#ifdef CONFIG_UBIFS_FS_DEBUG
c->dbg->new_ihead_lnum = lnum;
c->dbg->new_ihead_offs = buf_offs;
#endif
return 0;
}
/**
* layout_commit - determine positions of index nodes to commit.
* @c: UBIFS file-system description object
* @no_space: indicates that insufficient empty LEBs were allocated
* @cnt: number of znodes to commit
*
* Calculate and update the positions of index nodes to commit. If there were
* an insufficient number of empty LEBs allocated, then index nodes are placed
* into the gaps created by obsolete index nodes in non-empty index LEBs. For
* this purpose, an obsolete index node is one that was not in the index as at
* the end of the last commit. To write "in-the-gaps" requires that those index
* LEBs are updated atomically in-place.
*/
static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
{
int err;
if (no_space) {
err = layout_in_gaps(c, cnt);
if (err)
return err;
}
err = layout_in_empty_space(c);
return err;
}
/**
* find_first_dirty - find first dirty znode.
* @znode: znode to begin searching from
*/
static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
{
int i, cont;
if (!znode)
return NULL;
while (1) {
if (znode->level == 0) {
if (ubifs_zn_dirty(znode))
return znode;
return NULL;
}
cont = 0;
for (i = 0; i < znode->child_cnt; i++) {
struct ubifs_zbranch *zbr = &znode->zbranch[i];
if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
znode = zbr->znode;
cont = 1;
break;
}
}
if (!cont) {
if (ubifs_zn_dirty(znode))
return znode;
return NULL;
}
}
}
/**
* find_next_dirty - find next dirty znode.
* @znode: znode to begin searching from
*/
static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
{
int n = znode->iip + 1;
znode = znode->parent;
if (!znode)
return NULL;
for (; n < znode->child_cnt; n++) {
struct ubifs_zbranch *zbr = &znode->zbranch[n];
if (zbr->znode && ubifs_zn_dirty(zbr->znode))
return find_first_dirty(zbr->znode);
}
return znode;
}
/**
* get_znodes_to_commit - create list of dirty znodes to commit.
* @c: UBIFS file-system description object
*
* This function returns the number of znodes to commit.
*/
static int get_znodes_to_commit(struct ubifs_info *c)
{
struct ubifs_znode *znode, *cnext;
int cnt = 0;
c->cnext = find_first_dirty(c->zroot.znode);
znode = c->enext = c->cnext;
if (!znode) {
dbg_cmt("no znodes to commit");
return 0;
}
cnt += 1;
while (1) {
ubifs_assert(!test_bit(COW_ZNODE, &znode->flags));
__set_bit(COW_ZNODE, &znode->flags);
znode->alt = 0;
cnext = find_next_dirty(znode);
if (!cnext) {
znode->cnext = c->cnext;
break;
}
znode->cnext = cnext;
znode = cnext;
cnt += 1;
}
dbg_cmt("committing %d znodes", cnt);
ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
return cnt;
}
/**
* alloc_idx_lebs - allocate empty LEBs to be used to commit.
* @c: UBIFS file-system description object
* @cnt: number of znodes to commit
*
* This function returns %-ENOSPC if it cannot allocate a sufficient number of
* empty LEBs. %0 is returned on success, otherwise a negative error code
* is returned.
*/
static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
{
int i, leb_cnt, lnum;
c->ileb_cnt = 0;
c->ileb_nxt = 0;
leb_cnt = get_leb_cnt(c, cnt);
dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
if (!leb_cnt)
return 0;
c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
if (!c->ilebs)
return -ENOMEM;
for (i = 0; i < leb_cnt; i++) {
lnum = ubifs_find_free_leb_for_idx(c);
if (lnum < 0)
return lnum;
c->ilebs[c->ileb_cnt++] = lnum;
dbg_cmt("LEB %d", lnum);
}
if (dbg_force_in_the_gaps())
return -ENOSPC;
return 0;
}
/**
* free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
* @c: UBIFS file-system description object
*
* It is possible that we allocate more empty LEBs for the commit than we need.
* This functions frees the surplus.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int free_unused_idx_lebs(struct ubifs_info *c)
{
int i, err = 0, lnum, er;
for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
lnum = c->ilebs[i];
dbg_cmt("LEB %d", lnum);
er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
LPROPS_INDEX | LPROPS_TAKEN, 0);
if (!err)
err = er;
}
return err;
}
/**
* free_idx_lebs - free unused LEBs after commit end.
* @c: UBIFS file-system description object
*
* This function returns %0 on success and a negative error code on failure.
*/
static int free_idx_lebs(struct ubifs_info *c)
{
int err;
err = free_unused_idx_lebs(c);
kfree(c->ilebs);
c->ilebs = NULL;
return err;
}
/**
* ubifs_tnc_start_commit - start TNC commit.
* @c: UBIFS file-system description object
* @zroot: new index root position is returned here
*
* This function prepares the list of indexing nodes to commit and lays out
* their positions on flash. If there is not enough free space it uses the
* in-gap commit method. Returns zero in case of success and a negative error
* code in case of failure.
*/
int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
{
int err = 0, cnt;
mutex_lock(&c->tnc_mutex);
err = dbg_check_tnc(c, 1);
if (err)
goto out;
cnt = get_znodes_to_commit(c);
if (cnt != 0) {
int no_space = 0;
err = alloc_idx_lebs(c, cnt);
if (err == -ENOSPC)
no_space = 1;
else if (err)
goto out_free;
err = layout_commit(c, no_space, cnt);
if (err)
goto out_free;
ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
err = free_unused_idx_lebs(c);
if (err)
goto out;
}
destroy_old_idx(c);
memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
err = ubifs_save_dirty_idx_lnums(c);
if (err)
goto out;
spin_lock(&c->space_lock);
/*
* Although we have not finished committing yet, update size of the
* committed index ('c->bi.old_idx_sz') and zero out the index growth
* budget. It is OK to do this now, because we've reserved all the
* space which is needed to commit the index, and it is save for the
* budgeting subsystem to assume the index is already committed,
* even though it is not.
*/
ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
c->bi.old_idx_sz = c->calc_idx_sz;
c->bi.uncommitted_idx = 0;
c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
spin_unlock(&c->space_lock);
mutex_unlock(&c->tnc_mutex);
dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
dbg_cmt("size of index %llu", c->calc_idx_sz);
return err;
out_free:
free_idx_lebs(c);
out:
mutex_unlock(&c->tnc_mutex);
return err;
}
/**
* write_index - write index nodes.
* @c: UBIFS file-system description object
*
* This function writes the index nodes whose positions were laid out in the
* layout_in_empty_space function.
*/
static int write_index(struct ubifs_info *c)
{
struct ubifs_idx_node *idx;
struct ubifs_znode *znode, *cnext;
int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
int avail, wlen, err, lnum_pos = 0;
cnext = c->enext;
if (!cnext)
return 0;
/*
* Always write index nodes to the index head so that index nodes and
* other types of nodes are never mixed in the same erase block.
*/
lnum = c->ihead_lnum;
buf_offs = c->ihead_offs;
/* Allocate commit buffer */
buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
used = 0;
avail = buf_len;
/* Ensure there is enough room for first write */
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
if (buf_offs + next_len > c->leb_size) {
err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
LPROPS_TAKEN);
if (err)
return err;
lnum = -1;
}
while (1) {
cond_resched();
znode = cnext;
idx = c->cbuf + used;
/* Make index node */
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(znode->child_cnt);
idx->level = cpu_to_le16(znode->level);
for (i = 0; i < znode->child_cnt; i++) {
struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
struct ubifs_zbranch *zbr = &znode->zbranch[i];
key_write_idx(c, &zbr->key, &br->key);
br->lnum = cpu_to_le32(zbr->lnum);
br->offs = cpu_to_le32(zbr->offs);
br->len = cpu_to_le32(zbr->len);
if (!zbr->lnum || !zbr->len) {
ubifs_err("bad ref in znode");
dbg_dump_znode(c, znode);
if (zbr->znode)
dbg_dump_znode(c, zbr->znode);
}
}
len = ubifs_idx_node_sz(c, znode->child_cnt);
ubifs_prepare_node(c, idx, len, 0);
/* Determine the index node position */
if (lnum == -1) {
lnum = c->ilebs[lnum_pos++];
buf_offs = 0;
used = 0;
avail = buf_len;
}
offs = buf_offs + used;
#ifdef CONFIG_UBIFS_FS_DEBUG
if (lnum != znode->lnum || offs != znode->offs ||
len != znode->len) {
ubifs_err("inconsistent znode posn");
return -EINVAL;
}
#endif
/* Grab some stuff from znode while we still can */
cnext = znode->cnext;
ubifs_assert(ubifs_zn_dirty(znode));
ubifs_assert(test_bit(COW_ZNODE, &znode->flags));
/*
* It is important that other threads should see %DIRTY_ZNODE
* flag cleared before %COW_ZNODE. Specifically, it matters in
* the 'dirty_cow_znode()' function. This is the reason for the
* first barrier. Also, we want the bit changes to be seen to
* other threads ASAP, to avoid unnecesarry copying, which is
* the reason for the second barrier.
*/
clear_bit(DIRTY_ZNODE, &znode->flags);
smp_mb__before_clear_bit();
clear_bit(COW_ZNODE, &znode->flags);
smp_mb__after_clear_bit();
/* Do not access znode from this point on */
/* Update buffer positions */
wlen = used + len;
used += ALIGN(len, 8);
avail -= ALIGN(len, 8);
/*
* Calculate the next index node length to see if there is
* enough room for it
*/
if (cnext == c->cnext)
next_len = 0;
else
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
if (c->min_io_size == 1) {
/*
* Write the prepared index node immediately if there is
* no minimum IO size
*/
err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs,
wlen, UBI_SHORTTERM);
if (err)
return err;
buf_offs += ALIGN(wlen, 8);
if (next_len) {
used = 0;
avail = buf_len;
if (buf_offs + next_len > c->leb_size) {
err = ubifs_update_one_lp(c, lnum,
LPROPS_NC, 0, 0, LPROPS_TAKEN);
if (err)
return err;
lnum = -1;
}
continue;
}
} else {
int blen, nxt_offs = buf_offs + used + next_len;
if (next_len && nxt_offs <= c->leb_size) {
if (avail > 0)
continue;
else
blen = buf_len;
} else {
wlen = ALIGN(wlen, 8);
blen = ALIGN(wlen, c->min_io_size);
ubifs_pad(c, c->cbuf + wlen, blen - wlen);
}
/*
* The buffer is full or there are no more znodes
* to do
*/
err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs,
blen, UBI_SHORTTERM);
if (err)
return err;
buf_offs += blen;
if (next_len) {
if (nxt_offs > c->leb_size) {
err = ubifs_update_one_lp(c, lnum,
LPROPS_NC, 0, 0, LPROPS_TAKEN);
if (err)
return err;
lnum = -1;
}
used -= blen;
if (used < 0)
used = 0;
avail = buf_len - used;
memmove(c->cbuf, c->cbuf + blen, used);
continue;
}
}
break;
}
#ifdef CONFIG_UBIFS_FS_DEBUG
if (lnum != c->dbg->new_ihead_lnum ||
buf_offs != c->dbg->new_ihead_offs) {
ubifs_err("inconsistent ihead");
return -EINVAL;
}
#endif
c->ihead_lnum = lnum;
c->ihead_offs = buf_offs;
return 0;
}
/**
* free_obsolete_znodes - free obsolete znodes.
* @c: UBIFS file-system description object
*
* At the end of commit end, obsolete znodes are freed.
*/
static void free_obsolete_znodes(struct ubifs_info *c)
{
struct ubifs_znode *znode, *cnext;
cnext = c->cnext;
do {
znode = cnext;
cnext = znode->cnext;
if (test_bit(OBSOLETE_ZNODE, &znode->flags))
kfree(znode);
else {
znode->cnext = NULL;
atomic_long_inc(&c->clean_zn_cnt);
atomic_long_inc(&ubifs_clean_zn_cnt);
}
} while (cnext != c->cnext);
}
/**
* return_gap_lebs - return LEBs used by the in-gap commit method.
* @c: UBIFS file-system description object
*
* This function clears the "taken" flag for the LEBs which were used by the
* "commit in-the-gaps" method.
*/
static int return_gap_lebs(struct ubifs_info *c)
{
int *p, err;
if (!c->gap_lebs)
return 0;
dbg_cmt("");
for (p = c->gap_lebs; *p != -1; p++) {
err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
LPROPS_TAKEN, 0);
if (err)
return err;
}
kfree(c->gap_lebs);
c->gap_lebs = NULL;
return 0;
}
/**
* ubifs_tnc_end_commit - update the TNC for commit end.
* @c: UBIFS file-system description object
*
* Write the dirty znodes.
*/
int ubifs_tnc_end_commit(struct ubifs_info *c)
{
int err;
if (!c->cnext)
return 0;
err = return_gap_lebs(c);
if (err)
return err;
err = write_index(c);
if (err)
return err;
mutex_lock(&c->tnc_mutex);
dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
free_obsolete_znodes(c);
c->cnext = NULL;
kfree(c->ilebs);
c->ilebs = NULL;
mutex_unlock(&c->tnc_mutex);
return 0;
}
| gpl-2.0 |
aborche/cx-919-radxa-rbox-linux-rockchip | fs/ocfs2/dlmfs/dlmfs.c | 2894 | 17478 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* dlmfs.c
*
* Code which implements the kernel side of a minimal userspace
* interface to our DLM. This file handles the virtual file system
* used for communication with userspace. Credit should go to ramfs,
* which was a template for the fs side of this module.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
/* Simple VFS hooks based on: */
/*
* Resizable simple ram filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/poll.h>
#include <asm/uaccess.h>
#include "stackglue.h"
#include "userdlm.h"
#include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
static const struct inode_operations dlmfs_dir_inode_operations;
static const struct inode_operations dlmfs_root_inode_operations;
static const struct inode_operations dlmfs_file_inode_operations;
static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
/*
* These are the ABI capabilities of dlmfs.
*
* Over time, dlmfs has added some features that were not part of the
* initial ABI. Unfortunately, some of these features are not detectable
* via standard usage. For example, Linux's default poll always returns
* POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
* added poll support. Instead, we provide this list of new capabilities.
*
* Capabilities is a read-only attribute. We do it as a module parameter
* so we can discover it whether dlmfs is built in, loaded, or even not
* loaded.
*
* The ABI features are local to this machine's dlmfs mount. This is
* distinct from the locking protocol, which is concerned with inter-node
* interaction.
*
* Capabilities:
* - bast : POLLIN against the file descriptor of a held lock
* signifies a bast fired on the lock.
*/
#define DLMFS_CAPABILITIES "bast stackglue"
static int param_set_dlmfs_capabilities(const char *val,
struct kernel_param *kp)
{
printk(KERN_ERR "%s: readonly parameter\n", kp->name);
return -EINVAL;
}
static int param_get_dlmfs_capabilities(char *buffer,
struct kernel_param *kp)
{
return strlcpy(buffer, DLMFS_CAPABILITIES,
strlen(DLMFS_CAPABILITIES) + 1);
}
module_param_call(capabilities, param_set_dlmfs_capabilities,
param_get_dlmfs_capabilities, NULL, 0444);
MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
/*
* decodes a set of open flags into a valid lock level and a set of flags.
* returns < 0 if we have invalid flags
* flags which mean something to us:
* O_RDONLY -> PRMODE level
* O_WRONLY -> EXMODE level
*
* O_NONBLOCK -> NOQUEUE
*/
static int dlmfs_decode_open_flags(int open_flags,
int *level,
int *flags)
{
if (open_flags & (O_WRONLY|O_RDWR))
*level = DLM_LOCK_EX;
else
*level = DLM_LOCK_PR;
*flags = 0;
if (open_flags & O_NONBLOCK)
*flags |= DLM_LKF_NOQUEUE;
return 0;
}
static int dlmfs_file_open(struct inode *inode,
struct file *file)
{
int status, level, flags;
struct dlmfs_filp_private *fp = NULL;
struct dlmfs_inode_private *ip;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
file->f_flags);
status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
if (status < 0)
goto bail;
/* We don't want to honor O_APPEND at read/write time as it
* doesn't make sense for LVB writes. */
file->f_flags &= ~O_APPEND;
fp = kmalloc(sizeof(*fp), GFP_NOFS);
if (!fp) {
status = -ENOMEM;
goto bail;
}
fp->fp_lock_level = level;
ip = DLMFS_I(inode);
status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
if (status < 0) {
/* this is a strange error to return here but I want
* to be able userspace to be able to distinguish a
* valid lock request from one that simply couldn't be
* granted. */
if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
status = -ETXTBSY;
kfree(fp);
goto bail;
}
file->private_data = fp;
bail:
return status;
}
static int dlmfs_file_release(struct inode *inode,
struct file *file)
{
int level, status;
struct dlmfs_inode_private *ip = DLMFS_I(inode);
struct dlmfs_filp_private *fp = file->private_data;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "close called on inode %lu\n", inode->i_ino);
status = 0;
if (fp) {
level = fp->fp_lock_level;
if (level != DLM_LOCK_IV)
user_dlm_cluster_unlock(&ip->ip_lockres, level);
kfree(fp);
file->private_data = NULL;
}
return 0;
}
/*
* We do ->setattr() just to override size changes. Our size is the size
* of the LVB and nothing else.
*/
static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
{
int error;
struct inode *inode = dentry->d_inode;
attr->ia_valid &= ~ATTR_SIZE;
error = inode_change_ok(inode, attr);
if (error)
return error;
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
}
static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
{
int event = 0;
struct inode *inode = file->f_path.dentry->d_inode;
struct dlmfs_inode_private *ip = DLMFS_I(inode);
poll_wait(file, &ip->ip_lockres.l_event, wait);
spin_lock(&ip->ip_lockres.l_lock);
if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
event = POLLIN | POLLRDNORM;
spin_unlock(&ip->ip_lockres.l_lock);
return event;
}
static ssize_t dlmfs_file_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
ssize_t readlen, got;
char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode;
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
if (*ppos >= i_size_read(inode))
return 0;
if (!count)
return 0;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
/* don't read past the lvb */
if ((count + *ppos) > i_size_read(inode))
readlen = i_size_read(inode) - *ppos;
else
readlen = count;
lvb_buf = kmalloc(readlen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
got = user_dlm_read_lvb(inode, lvb_buf, readlen);
if (got) {
BUG_ON(got != readlen);
bytes_left = __copy_to_user(buf, lvb_buf, readlen);
readlen -= bytes_left;
} else
readlen = 0;
kfree(lvb_buf);
*ppos = *ppos + readlen;
mlog(0, "read %zd bytes\n", readlen);
return readlen;
}
static ssize_t dlmfs_file_write(struct file *filp,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
ssize_t writelen;
char *lvb_buf;
struct inode *inode = filp->f_path.dentry->d_inode;
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
if (*ppos >= i_size_read(inode))
return -ENOSPC;
if (!count)
return 0;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/* don't write past the lvb */
if ((count + *ppos) > i_size_read(inode))
writelen = i_size_read(inode) - *ppos;
else
writelen = count - *ppos;
lvb_buf = kmalloc(writelen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
bytes_left = copy_from_user(lvb_buf, buf, writelen);
writelen -= bytes_left;
if (writelen)
user_dlm_write_lvb(inode, lvb_buf, writelen);
kfree(lvb_buf);
*ppos = *ppos + writelen;
mlog(0, "wrote %zd bytes\n", writelen);
return writelen;
}
static void dlmfs_init_once(void *foo)
{
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
}
static struct inode *dlmfs_alloc_inode(struct super_block *sb)
{
struct dlmfs_inode_private *ip;
ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
if (!ip)
return NULL;
return &ip->ip_vfs_inode;
}
static void dlmfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
}
static void dlmfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, dlmfs_i_callback);
}
static void dlmfs_evict_inode(struct inode *inode)
{
int status;
struct dlmfs_inode_private *ip;
end_writeback(inode);
mlog(0, "inode %lu\n", inode->i_ino);
ip = DLMFS_I(inode);
if (S_ISREG(inode->i_mode)) {
status = user_dlm_destroy_lock(&ip->ip_lockres);
if (status < 0)
mlog_errno(status);
iput(ip->ip_parent);
goto clear_fields;
}
mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
if (ip->ip_conn)
user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
ip->ip_conn = NULL;
}
static struct backing_dev_info dlmfs_backing_dev_info = {
.name = "ocfs2-dlmfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
int mode = S_IFDIR | 0755;
struct dlmfs_inode_private *ip;
if (inode) {
ip = DLMFS_I(inode);
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
inode->i_op = &dlmfs_root_inode_operations;
}
return inode;
}
static struct inode *dlmfs_get_inode(struct inode *parent,
struct dentry *dentry,
int mode)
{
struct super_block *sb = parent->i_sb;
struct inode * inode = new_inode(sb);
struct dlmfs_inode_private *ip;
if (!inode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
/* for now we don't support anything other than
* directories and regular files. */
BUG();
break;
case S_IFREG:
inode->i_op = &dlmfs_file_inode_operations;
inode->i_fop = &dlmfs_file_operations;
i_size_write(inode, DLM_LVB_LEN);
user_dlm_lock_res_init(&ip->ip_lockres, dentry);
/* released at clear_inode time, this insures that we
* get to drop the dlm reference on each lock *before*
* we call the unregister code for releasing parent
* directories. */
ip->ip_parent = igrab(parent);
BUG_ON(!ip->ip_parent);
break;
case S_IFDIR:
inode->i_op = &dlmfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink ==
* 2 (for "." entry) */
inc_nlink(inode);
break;
}
if (parent->i_mode & S_ISGID) {
inode->i_gid = parent->i_gid;
if (S_ISDIR(mode))
inode->i_mode |= S_ISGID;
}
return inode;
}
/*
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
static int dlmfs_mkdir(struct inode * dir,
struct dentry * dentry,
int mode)
{
int status;
struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ip = DLMFS_I(inode);
conn = user_dlm_register(domain);
if (IS_ERR(conn)) {
status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
status = 0;
bail:
if (status < 0)
iput(inode);
return status;
}
static int dlmfs_create(struct inode *dir,
struct dentry *dentry,
int mode,
struct nameidata *nd)
{
int status = 0;
struct inode *inode;
struct qstr *name = &dentry->d_name;
mlog(0, "create %.*s\n", name->len, name->name);
/* verify name is valid and doesn't contain any dlm reserved
* characters */
if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
name->name[0] == '$') {
status = -EINVAL;
mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
name->name);
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
bail:
return status;
}
static int dlmfs_unlink(struct inode *dir,
struct dentry *dentry)
{
int status;
struct inode *inode = dentry->d_inode;
mlog(0, "unlink inode %lu\n", inode->i_ino);
/* if there are no current holders, or none that are waiting
* to acquire a lock, this basically destroys our lockres. */
status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
if (status < 0) {
mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
dentry->d_name.len, dentry->d_name.name, status);
goto bail;
}
status = simple_unlink(dir, dentry);
bail:
return status;
}
static int dlmfs_fill_super(struct super_block * sb,
void * data,
int silent)
{
struct inode * inode;
struct dentry * root;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = DLMFS_MAGIC;
sb->s_op = &dlmfs_ops;
inode = dlmfs_get_root_inode(sb);
if (!inode)
return -ENOMEM;
root = d_alloc_root(inode);
if (!root) {
iput(inode);
return -ENOMEM;
}
sb->s_root = root;
return 0;
}
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
.poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
.llseek = default_llseek,
};
static const struct inode_operations dlmfs_dir_inode_operations = {
.create = dlmfs_create,
.lookup = simple_lookup,
.unlink = dlmfs_unlink,
};
/* this way we can restrict mkdir to only the toplevel of the fs. */
static const struct inode_operations dlmfs_root_inode_operations = {
.lookup = simple_lookup,
.mkdir = dlmfs_mkdir,
.rmdir = simple_rmdir,
};
static const struct super_operations dlmfs_ops = {
.statfs = simple_statfs,
.alloc_inode = dlmfs_alloc_inode,
.destroy_inode = dlmfs_destroy_inode,
.evict_inode = dlmfs_evict_inode,
.drop_inode = generic_delete_inode,
};
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
.setattr = dlmfs_file_setattr,
};
static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
}
static struct file_system_type dlmfs_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2_dlmfs",
.mount = dlmfs_mount,
.kill_sb = kill_litter_super,
};
static int __init init_dlmfs_fs(void)
{
int status;
int cleanup_inode = 0, cleanup_worker = 0;
dlmfs_print_version();
status = bdi_init(&dlmfs_backing_dev_info);
if (status)
return status;
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
dlmfs_init_once);
if (!dlmfs_inode_cache) {
status = -ENOMEM;
goto bail;
}
cleanup_inode = 1;
user_dlm_worker = create_singlethread_workqueue("user_dlm");
if (!user_dlm_worker) {
status = -ENOMEM;
goto bail;
}
cleanup_worker = 1;
user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
if (cleanup_inode)
kmem_cache_destroy(dlmfs_inode_cache);
if (cleanup_worker)
destroy_workqueue(user_dlm_worker);
bdi_destroy(&dlmfs_backing_dev_info);
} else
printk("OCFS2 User DLM kernel interface loaded\n");
return status;
}
static void __exit exit_dlmfs_fs(void)
{
unregister_filesystem(&dlmfs_fs_type);
flush_workqueue(user_dlm_worker);
destroy_workqueue(user_dlm_worker);
kmem_cache_destroy(dlmfs_inode_cache);
bdi_destroy(&dlmfs_backing_dev_info);
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
module_init(init_dlmfs_fs)
module_exit(exit_dlmfs_fs)
| gpl-2.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c | 3406 | 3754 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/ibus.h>
struct nvc0_ibus_priv {
struct nouveau_ibus base;
};
static void
nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr(struct nouveau_subdev *subdev)
{
struct nvc0_ibus_priv *priv = (void *)subdev;
u32 intr0 = nv_rd32(priv, 0x121c58);
u32 intr1 = nv_rd32(priv, 0x121c5c);
u32 hubnr = nv_rd32(priv, 0x121c70);
u32 ropnr = nv_rd32(priv, 0x121c74);
u32 gpcnr = nv_rd32(priv, 0x121c78);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
nvc0_ibus_intr_hub(priv, i);
intr0 &= ~stat;
}
}
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
nvc0_ibus_intr_rop(priv, i);
intr0 &= ~stat;
}
}
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
nvc0_ibus_intr_gpc(priv, i);
intr1 &= ~stat;
}
}
}
static int
nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_ibus_priv *priv;
int ret;
ret = nouveau_ibus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->intr = nvc0_ibus_intr;
return 0;
}
struct nouveau_oclass
nvc0_ibus_oclass = {
.handle = NV_SUBDEV(IBUS, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ibus_ctor,
.dtor = _nouveau_ibus_dtor,
.init = _nouveau_ibus_init,
.fini = _nouveau_ibus_fini,
},
};
| gpl-2.0 |
taudac/linux | drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c | 3406 | 3754 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/ibus.h>
struct nvc0_ibus_priv {
struct nouveau_ibus base;
};
static void
nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
{
u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
static void
nvc0_ibus_intr(struct nouveau_subdev *subdev)
{
struct nvc0_ibus_priv *priv = (void *)subdev;
u32 intr0 = nv_rd32(priv, 0x121c58);
u32 intr1 = nv_rd32(priv, 0x121c5c);
u32 hubnr = nv_rd32(priv, 0x121c70);
u32 ropnr = nv_rd32(priv, 0x121c74);
u32 gpcnr = nv_rd32(priv, 0x121c78);
u32 i;
for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
u32 stat = 0x00000100 << i;
if (intr0 & stat) {
nvc0_ibus_intr_hub(priv, i);
intr0 &= ~stat;
}
}
for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
u32 stat = 0x00010000 << i;
if (intr0 & stat) {
nvc0_ibus_intr_rop(priv, i);
intr0 &= ~stat;
}
}
for (i = 0; intr1 && i < gpcnr; i++) {
u32 stat = 0x00000001 << i;
if (intr1 & stat) {
nvc0_ibus_intr_gpc(priv, i);
intr1 &= ~stat;
}
}
}
static int
nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nvc0_ibus_priv *priv;
int ret;
ret = nouveau_ibus_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->intr = nvc0_ibus_intr;
return 0;
}
struct nouveau_oclass
nvc0_ibus_oclass = {
.handle = NV_SUBDEV(IBUS, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ibus_ctor,
.dtor = _nouveau_ibus_dtor,
.init = _nouveau_ibus_init,
.fini = _nouveau_ibus_fini,
},
};
| gpl-2.0 |
cosmoecho/linux_xenvnuma | drivers/ssb/pcmcia.c | 3918 | 19719 | /*
* Sonics Silicon Backplane
* PCMCIA-Hostbus related functions
*
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2007-2008 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include "ssb_private.h"
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0
/* PCMCIA configuration registers */
#define SSB_PCMCIA_ADDRESS0 0x2E
#define SSB_PCMCIA_ADDRESS1 0x30
#define SSB_PCMCIA_ADDRESS2 0x32
#define SSB_PCMCIA_MEMSEG 0x34
#define SSB_PCMCIA_SPROMCTL 0x36
#define SSB_PCMCIA_SPROMCTL_IDLE 0
#define SSB_PCMCIA_SPROMCTL_WRITE 1
#define SSB_PCMCIA_SPROMCTL_READ 2
#define SSB_PCMCIA_SPROMCTL_WRITEEN 4
#define SSB_PCMCIA_SPROMCTL_WRITEDIS 7
#define SSB_PCMCIA_SPROMCTL_DONE 8
#define SSB_PCMCIA_SPROM_DATALO 0x38
#define SSB_PCMCIA_SPROM_DATAHI 0x3A
#define SSB_PCMCIA_SPROM_ADDRLO 0x3C
#define SSB_PCMCIA_SPROM_ADDRHI 0x3E
/* Hardware invariants CIS tuples */
#define SSB_PCMCIA_CIS 0x80
#define SSB_PCMCIA_CIS_ID 0x01
#define SSB_PCMCIA_CIS_BOARDREV 0x02
#define SSB_PCMCIA_CIS_PA 0x03
#define SSB_PCMCIA_CIS_PA_PA0B0_LO 0
#define SSB_PCMCIA_CIS_PA_PA0B0_HI 1
#define SSB_PCMCIA_CIS_PA_PA0B1_LO 2
#define SSB_PCMCIA_CIS_PA_PA0B1_HI 3
#define SSB_PCMCIA_CIS_PA_PA0B2_LO 4
#define SSB_PCMCIA_CIS_PA_PA0B2_HI 5
#define SSB_PCMCIA_CIS_PA_ITSSI 6
#define SSB_PCMCIA_CIS_PA_MAXPOW 7
#define SSB_PCMCIA_CIS_OEMNAME 0x04
#define SSB_PCMCIA_CIS_CCODE 0x05
#define SSB_PCMCIA_CIS_ANTENNA 0x06
#define SSB_PCMCIA_CIS_ANTGAIN 0x07
#define SSB_PCMCIA_CIS_BFLAGS 0x08
#define SSB_PCMCIA_CIS_LEDS 0x09
/* PCMCIA SPROM size. */
#define SSB_PCMCIA_SPROM_SIZE 256
#define SSB_PCMCIA_SPROM_SIZE_BYTES (SSB_PCMCIA_SPROM_SIZE * sizeof(u16))
/* Write to a PCMCIA configuration register. */
static int ssb_pcmcia_cfg_write(struct ssb_bus *bus, u8 offset, u8 value)
{
int res;
res = pcmcia_write_config_byte(bus->host_pcmcia, offset, value);
if (unlikely(res != 0))
return -EBUSY;
return 0;
}
/* Read from a PCMCIA configuration register. */
static int ssb_pcmcia_cfg_read(struct ssb_bus *bus, u8 offset, u8 *value)
{
int res;
res = pcmcia_read_config_byte(bus->host_pcmcia, offset, value);
if (unlikely(res != 0))
return -EBUSY;
return 0;
}
int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
u8 coreidx)
{
int err;
int attempts = 0;
u32 cur_core;
u32 addr;
u32 read_addr;
u8 val;
addr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE;
while (1) {
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS0,
(addr & 0x0000F000) >> 12);
if (err)
goto error;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS1,
(addr & 0x00FF0000) >> 16);
if (err)
goto error;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_ADDRESS2,
(addr & 0xFF000000) >> 24);
if (err)
goto error;
read_addr = 0;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS0, &val);
if (err)
goto error;
read_addr |= ((u32)(val & 0x0F)) << 12;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS1, &val);
if (err)
goto error;
read_addr |= ((u32)val) << 16;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_ADDRESS2, &val);
if (err)
goto error;
read_addr |= ((u32)val) << 24;
cur_core = (read_addr - SSB_ENUM_BASE) / SSB_CORE_SIZE;
if (cur_core == coreidx)
break;
err = -ETIMEDOUT;
if (attempts++ > SSB_BAR0_MAX_RETRIES)
goto error;
udelay(10);
}
return 0;
error:
ssb_err("Failed to switch to core %u\n", coreidx);
return err;
}
int ssb_pcmcia_switch_core(struct ssb_bus *bus,
struct ssb_device *dev)
{
int err;
#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
ssb_info("Switching to %s core, index %d\n",
ssb_core_name(dev->id.coreid),
dev->core_index);
#endif
err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
if (!err)
bus->mapped_device = dev;
return err;
}
int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
{
int attempts = 0;
int err;
u8 val;
SSB_WARN_ON((seg != 0) && (seg != 1));
while (1) {
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_MEMSEG, seg);
if (err)
goto error;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_MEMSEG, &val);
if (err)
goto error;
if (val == seg)
break;
err = -ETIMEDOUT;
if (unlikely(attempts++ > SSB_BAR0_MAX_RETRIES))
goto error;
udelay(10);
}
bus->mapped_pcmcia_seg = seg;
return 0;
error:
ssb_err("Failed to switch pcmcia segment\n");
return err;
}
static int select_core_and_segment(struct ssb_device *dev,
u16 *offset)
{
struct ssb_bus *bus = dev->bus;
int err;
u8 need_segment;
if (*offset >= 0x800) {
*offset -= 0x800;
need_segment = 1;
} else
need_segment = 0;
if (unlikely(dev != bus->mapped_device)) {
err = ssb_pcmcia_switch_core(bus, dev);
if (unlikely(err))
return err;
}
if (unlikely(need_segment != bus->mapped_pcmcia_seg)) {
err = ssb_pcmcia_switch_segment(bus, need_segment);
if (unlikely(err))
return err;
}
return 0;
}
static u8 ssb_pcmcia_read8(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
u8 value = 0xFF;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
value = readb(bus->mmio + offset);
spin_unlock_irqrestore(&bus->bar_lock, flags);
return value;
}
static u16 ssb_pcmcia_read16(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
u16 value = 0xFFFF;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
value = readw(bus->mmio + offset);
spin_unlock_irqrestore(&bus->bar_lock, flags);
return value;
}
static u32 ssb_pcmcia_read32(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
u32 lo = 0xFFFFFFFF, hi = 0xFFFFFFFF;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err)) {
lo = readw(bus->mmio + offset);
hi = readw(bus->mmio + offset + 2);
}
spin_unlock_irqrestore(&bus->bar_lock, flags);
return (lo | (hi << 16));
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_pcmcia_block_read(struct ssb_device *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
void __iomem *addr = bus->mmio + offset;
int err;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (unlikely(err)) {
memset(buffer, 0xFF, count);
goto unlock;
}
switch (reg_width) {
case sizeof(u8): {
u8 *buf = buffer;
while (count) {
*buf = __raw_readb(addr);
buf++;
count--;
}
break;
}
case sizeof(u16): {
__le16 *buf = buffer;
SSB_WARN_ON(count & 1);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
count -= 2;
}
break;
}
case sizeof(u32): {
__le16 *buf = buffer;
SSB_WARN_ON(count & 3);
while (count) {
*buf = (__force __le16)__raw_readw(addr);
buf++;
*buf = (__force __le16)__raw_readw(addr + 2);
buf++;
count -= 4;
}
break;
}
default:
SSB_WARN_ON(1);
}
unlock:
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
static void ssb_pcmcia_write8(struct ssb_device *dev, u16 offset, u8 value)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writeb(value, bus->mmio + offset);
mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err))
writew(value, bus->mmio + offset);
mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
int err;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (likely(!err)) {
writew((value & 0x0000FFFF), bus->mmio + offset);
writew(((value & 0xFFFF0000) >> 16), bus->mmio + offset + 2);
}
mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_pcmcia_block_write(struct ssb_device *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
struct ssb_bus *bus = dev->bus;
unsigned long flags;
void __iomem *addr = bus->mmio + offset;
int err;
spin_lock_irqsave(&bus->bar_lock, flags);
err = select_core_and_segment(dev, &offset);
if (unlikely(err))
goto unlock;
switch (reg_width) {
case sizeof(u8): {
const u8 *buf = buffer;
while (count) {
__raw_writeb(*buf, addr);
buf++;
count--;
}
break;
}
case sizeof(u16): {
const __le16 *buf = buffer;
SSB_WARN_ON(count & 1);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
count -= 2;
}
break;
}
case sizeof(u32): {
const __le16 *buf = buffer;
SSB_WARN_ON(count & 3);
while (count) {
__raw_writew((__force u16)(*buf), addr);
buf++;
__raw_writew((__force u16)(*buf), addr + 2);
buf++;
count -= 4;
}
break;
}
default:
SSB_WARN_ON(1);
}
unlock:
mmiowb();
spin_unlock_irqrestore(&bus->bar_lock, flags);
}
#endif /* CONFIG_SSB_BLOCKIO */
/* Not "static", as it's used in main.c */
const struct ssb_bus_ops ssb_pcmcia_ops = {
.read8 = ssb_pcmcia_read8,
.read16 = ssb_pcmcia_read16,
.read32 = ssb_pcmcia_read32,
.write8 = ssb_pcmcia_write8,
.write16 = ssb_pcmcia_write16,
.write32 = ssb_pcmcia_write32,
#ifdef CONFIG_SSB_BLOCKIO
.block_read = ssb_pcmcia_block_read,
.block_write = ssb_pcmcia_block_write,
#endif
};
static int ssb_pcmcia_sprom_command(struct ssb_bus *bus, u8 command)
{
unsigned int i;
int err;
u8 value;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROMCTL, command);
if (err)
return err;
for (i = 0; i < 1000; i++) {
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROMCTL, &value);
if (err)
return err;
if (value & SSB_PCMCIA_SPROMCTL_DONE)
return 0;
udelay(10);
}
return -ETIMEDOUT;
}
/* offset is the 16bit word offset */
static int ssb_pcmcia_sprom_read(struct ssb_bus *bus, u16 offset, u16 *value)
{
int err;
u8 lo, hi;
offset *= 2; /* Make byte offset */
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRLO,
(offset & 0x00FF));
if (err)
return err;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRHI,
(offset & 0xFF00) >> 8);
if (err)
return err;
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_READ);
if (err)
return err;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROM_DATALO, &lo);
if (err)
return err;
err = ssb_pcmcia_cfg_read(bus, SSB_PCMCIA_SPROM_DATAHI, &hi);
if (err)
return err;
*value = (lo | (((u16)hi) << 8));
return 0;
}
/* offset is the 16bit word offset */
static int ssb_pcmcia_sprom_write(struct ssb_bus *bus, u16 offset, u16 value)
{
int err;
offset *= 2; /* Make byte offset */
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRLO,
(offset & 0x00FF));
if (err)
return err;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_ADDRHI,
(offset & 0xFF00) >> 8);
if (err)
return err;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_DATALO,
(value & 0x00FF));
if (err)
return err;
err = ssb_pcmcia_cfg_write(bus, SSB_PCMCIA_SPROM_DATAHI,
(value & 0xFF00) >> 8);
if (err)
return err;
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITE);
if (err)
return err;
msleep(20);
return 0;
}
/* Read the SPROM image. bufsize is in 16bit words. */
static int ssb_pcmcia_sprom_read_all(struct ssb_bus *bus, u16 *sprom)
{
int err, i;
for (i = 0; i < SSB_PCMCIA_SPROM_SIZE; i++) {
err = ssb_pcmcia_sprom_read(bus, i, &sprom[i]);
if (err)
return err;
}
return 0;
}
/* Write the SPROM image. size is in 16bit words. */
static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
{
int i, err;
bool failed = 0;
size_t size = SSB_PCMCIA_SPROM_SIZE;
ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
if (err) {
ssb_notice("Could not enable SPROM write access\n");
return -EBUSY;
}
ssb_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
ssb_cont("25%%");
else if (i == size / 2)
ssb_cont("50%%");
else if (i == (size * 3) / 4)
ssb_cont("75%%");
else if (i % 2)
ssb_cont(".");
err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
if (err) {
ssb_notice("Failed to write to SPROM\n");
failed = 1;
break;
}
}
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
if (err) {
ssb_notice("Could not disable SPROM write access\n");
failed = 1;
}
msleep(500);
if (!failed) {
ssb_cont("100%% ]\n");
ssb_notice("SPROM written\n");
}
return failed ? -EBUSY : 0;
}
static int ssb_pcmcia_sprom_check_crc(const u16 *sprom, size_t size)
{
//TODO
return 0;
}
#define GOTO_ERROR_ON(condition, description) do { \
if (unlikely(condition)) { \
error_description = description; \
goto error; \
} \
} while (0)
static int ssb_pcmcia_get_mac(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv)
{
struct ssb_sprom *sprom = priv;
if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
return -EINVAL;
if (tuple->TupleDataLen != ETH_ALEN + 2)
return -EINVAL;
if (tuple->TupleData[1] != ETH_ALEN)
return -EINVAL;
memcpy(sprom->il0mac, &tuple->TupleData[2], ETH_ALEN);
return 0;
};
static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
tuple_t *tuple,
void *priv)
{
struct ssb_init_invariants *iv = priv;
struct ssb_sprom *sprom = &iv->sprom;
struct ssb_boardinfo *bi = &iv->boardinfo;
const char *error_description;
GOTO_ERROR_ON(tuple->TupleDataLen < 1, "VEN tpl < 1");
switch (tuple->TupleData[0]) {
case SSB_PCMCIA_CIS_ID:
GOTO_ERROR_ON((tuple->TupleDataLen != 5) &&
(tuple->TupleDataLen != 7),
"id tpl size");
bi->vendor = tuple->TupleData[1] |
((u16)tuple->TupleData[2] << 8);
break;
case SSB_PCMCIA_CIS_BOARDREV:
GOTO_ERROR_ON(tuple->TupleDataLen != 2,
"boardrev tpl size");
sprom->board_rev = tuple->TupleData[1];
break;
case SSB_PCMCIA_CIS_PA:
GOTO_ERROR_ON((tuple->TupleDataLen != 9) &&
(tuple->TupleDataLen != 10),
"pa tpl size");
sprom->pa0b0 = tuple->TupleData[1] |
((u16)tuple->TupleData[2] << 8);
sprom->pa0b1 = tuple->TupleData[3] |
((u16)tuple->TupleData[4] << 8);
sprom->pa0b2 = tuple->TupleData[5] |
((u16)tuple->TupleData[6] << 8);
sprom->itssi_a = tuple->TupleData[7];
sprom->itssi_bg = tuple->TupleData[7];
sprom->maxpwr_a = tuple->TupleData[8];
sprom->maxpwr_bg = tuple->TupleData[8];
break;
case SSB_PCMCIA_CIS_OEMNAME:
/* We ignore this. */
break;
case SSB_PCMCIA_CIS_CCODE:
GOTO_ERROR_ON(tuple->TupleDataLen != 2,
"ccode tpl size");
sprom->country_code = tuple->TupleData[1];
break;
case SSB_PCMCIA_CIS_ANTENNA:
GOTO_ERROR_ON(tuple->TupleDataLen != 2,
"ant tpl size");
sprom->ant_available_a = tuple->TupleData[1];
sprom->ant_available_bg = tuple->TupleData[1];
break;
case SSB_PCMCIA_CIS_ANTGAIN:
GOTO_ERROR_ON(tuple->TupleDataLen != 2,
"antg tpl size");
sprom->antenna_gain.a0 = tuple->TupleData[1];
sprom->antenna_gain.a1 = tuple->TupleData[1];
sprom->antenna_gain.a2 = tuple->TupleData[1];
sprom->antenna_gain.a3 = tuple->TupleData[1];
break;
case SSB_PCMCIA_CIS_BFLAGS:
GOTO_ERROR_ON((tuple->TupleDataLen != 3) &&
(tuple->TupleDataLen != 5),
"bfl tpl size");
sprom->boardflags_lo = tuple->TupleData[1] |
((u16)tuple->TupleData[2] << 8);
break;
case SSB_PCMCIA_CIS_LEDS:
GOTO_ERROR_ON(tuple->TupleDataLen != 5,
"leds tpl size");
sprom->gpio0 = tuple->TupleData[1];
sprom->gpio1 = tuple->TupleData[2];
sprom->gpio2 = tuple->TupleData[3];
sprom->gpio3 = tuple->TupleData[4];
break;
}
return -ENOSPC; /* continue with next entry */
error:
ssb_err(
"PCMCIA: Failed to fetch device invariants: %s\n",
error_description);
return -ENODEV;
}
int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv)
{
struct ssb_sprom *sprom = &iv->sprom;
int res;
memset(sprom, 0xFF, sizeof(*sprom));
sprom->revision = 1;
sprom->boardflags_lo = 0;
sprom->boardflags_hi = 0;
/* First fetch the MAC address. */
res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
ssb_pcmcia_get_mac, sprom);
if (res != 0) {
ssb_err(
"PCMCIA: Failed to fetch MAC address\n");
return -ENODEV;
}
/* Fetch the vendor specific tuples. */
res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
ssb_pcmcia_do_get_invariants, iv);
if ((res == 0) || (res == -ENOSPC))
return 0;
ssb_err(
"PCMCIA: Failed to fetch device invariants\n");
return -ENODEV;
}
static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev,
struct device_attribute *attr,
char *buf)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
struct ssb_bus *bus;
bus = ssb_pcmcia_dev_to_bus(pdev);
if (!bus)
return -ENODEV;
return ssb_attr_sprom_show(bus, buf,
ssb_pcmcia_sprom_read_all);
}
static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
struct ssb_bus *bus;
bus = ssb_pcmcia_dev_to_bus(pdev);
if (!bus)
return -ENODEV;
return ssb_attr_sprom_store(bus, buf, count,
ssb_pcmcia_sprom_check_crc,
ssb_pcmcia_sprom_write_all);
}
static DEVICE_ATTR(ssb_sprom, 0600,
ssb_pcmcia_attr_sprom_show,
ssb_pcmcia_attr_sprom_store);
static int ssb_pcmcia_cor_setup(struct ssb_bus *bus, u8 cor)
{
u8 val;
int err;
err = ssb_pcmcia_cfg_read(bus, cor, &val);
if (err)
return err;
val &= ~COR_SOFT_RESET;
val |= COR_FUNC_ENA | COR_IREQ_ENA | COR_LEVEL_REQ;
err = ssb_pcmcia_cfg_write(bus, cor, val);
if (err)
return err;
msleep(40);
return 0;
}
/* Initialize the PCMCIA hardware. This is called on Init and Resume. */
int ssb_pcmcia_hardware_setup(struct ssb_bus *bus)
{
int err;
if (bus->bustype != SSB_BUSTYPE_PCMCIA)
return 0;
/* Switch segment to a known state and sync
* bus->mapped_pcmcia_seg with hardware state. */
ssb_pcmcia_switch_segment(bus, 0);
/* Init the COR register. */
err = ssb_pcmcia_cor_setup(bus, CISREG_COR);
if (err)
return err;
/* Some cards also need this register to get poked. */
err = ssb_pcmcia_cor_setup(bus, CISREG_COR + 0x80);
if (err)
return err;
return 0;
}
void ssb_pcmcia_exit(struct ssb_bus *bus)
{
if (bus->bustype != SSB_BUSTYPE_PCMCIA)
return;
device_remove_file(&bus->host_pcmcia->dev, &dev_attr_ssb_sprom);
}
int ssb_pcmcia_init(struct ssb_bus *bus)
{
int err;
if (bus->bustype != SSB_BUSTYPE_PCMCIA)
return 0;
err = ssb_pcmcia_hardware_setup(bus);
if (err)
goto error;
bus->sprom_size = SSB_PCMCIA_SPROM_SIZE;
mutex_init(&bus->sprom_mutex);
err = device_create_file(&bus->host_pcmcia->dev, &dev_attr_ssb_sprom);
if (err)
goto error;
return 0;
error:
ssb_err("Failed to initialize PCMCIA host device\n");
return err;
}
| gpl-2.0 |
daivietpda/M7WLJ-5.0.2 | arch/m68k/hp300/time.c | 4686 | 1955 | /*
* linux/arch/m68k/hp300/time.c
*
* Copyright (C) 1998 Philip Blundell <philb@gnu.org>
*
* This file contains the HP300-specific time handling code.
*/
#include <asm/ptrace.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/traps.h>
#include <asm/blinken.h>
/* Clock hardware definitions */
#define CLOCKBASE 0xf05f8000
#define CLKCR1 0x1
#define CLKCR2 0x3
#define CLKCR3 CLKCR1
#define CLKSR CLKCR2
#define CLKMSB1 0x5
#define CLKMSB2 0x9
#define CLKMSB3 0xD
/* This is for machines which generate the exact clock. */
#define USECS_PER_JIFFY (1000000/HZ)
#define INTVAL ((10000 / 4) - 1)
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
unsigned long tmp;
irq_handler_t vector = dev_id;
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
return vector(irq, NULL);
}
unsigned long hp300_gettimeoffset(void)
{
/* Read current timer 1 value */
unsigned char lsb, msb1, msb2;
unsigned short ticks;
msb1 = in_8(CLOCKBASE + 5);
lsb = in_8(CLOCKBASE + 7);
msb2 = in_8(CLOCKBASE + 5);
if (msb1 != msb2)
/* A carry happened while we were reading. Read it again */
lsb = in_8(CLOCKBASE + 7);
ticks = INTVAL - ((msb2 << 8) | lsb);
return (USECS_PER_JIFFY * ticks) / INTVAL;
}
void __init hp300_sched_init(irq_handler_t vector)
{
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x1); /* reset */
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
}
| gpl-2.0 |
Oi-Android/android_kernel_xiaomi_ferrari-mr | drivers/isdn/hardware/avm/b1pci.c | 4686 | 10820 | /* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
*
* Module for AVM B1 PCI-card.
*
* Copyright 1999 by Carsten Paeth <calle@calle.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/capi.h>
#include <asm/io.h>
#include <linux/init.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capilli.h>
#include "avmcard.h"
/* ------------------------------------------------------------- */
static char *revision = "$Revision: 1.1.2.2 $";
/* ------------------------------------------------------------- */
static struct pci_device_id b1pci_pci_tbl[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
/* ------------------------------------------------------------- */
static char *b1pci_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pci-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free;
}
b1_reset(card->port);
retval = b1_detect(card->port, card->cardtype);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_release_region;
}
b1_reset(card->port);
b1_getrevision(card);
retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
retval = -EBUSY;
goto err_release_region;
}
cinfo->capi_ctrl.driver_name = "b1pci";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1_register_appl;
cinfo->capi_ctrl.release_appl = b1_release_appl;
cinfo->capi_ctrl.send_message = b1_send_message;
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pci_procinfo;
cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
cinfo->capi_ctrl.owner = THIS_MODULE;
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
if (card->revision >= 4) {
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
card->port, card->irq, card->revision);
} else {
printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
card->port, card->irq, card->revision);
}
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pci_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
unsigned int port = card->port;
b1_reset(port);
b1_reset(port);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
release_region(card->port, AVMB1_PORTLEN);
b1_free_card(card);
}
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
/* ------------------------------------------------------------- */
static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->membase : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128);
if (!card->dma) {
printk(KERN_WARNING "b1pci: dma alloc.\n");
retval = -ENOMEM;
goto err_free;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pciv4-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->membase = p->membase;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free_dma;
}
card->mbase = ioremap(card->membase, 64);
if (!card->mbase) {
printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
card->membase);
retval = -ENOMEM;
goto err_release_region;
}
b1dma_reset(card);
retval = b1pciv4_detect(card);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_unmap;
}
b1dma_reset(card);
b1_getrevision(card);
retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
card->irq);
retval = -EBUSY;
goto err_unmap;
}
cinfo->capi_ctrl.owner = THIS_MODULE;
cinfo->capi_ctrl.driver_name = "b1pciv4";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1dma_register_appl;
cinfo->capi_ctrl.release_appl = b1dma_release_appl;
cinfo->capi_ctrl.send_message = b1dma_send_message;
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
card->cardnr = cinfo->capi_ctrl.cnr;
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
card->port, card->irq, card->membase, card->revision);
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_unmap:
iounmap(card->mbase);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free_dma:
avmcard_dma_free(card->dma);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pciv4_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
b1dma_reset(card);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
iounmap(card->mbase);
release_region(card->port, AVMB1_PORTLEN);
avmcard_dma_free(card->dma);
b1_free_card(card);
}
#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
static int b1pci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
if (pci_enable_device(pdev) < 0) {
printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
return -ENODEV;
}
param.irq = pdev->irq;
if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
pci_set_master(pdev);
#endif
param.membase = pci_resource_start(pdev, 0);
param.port = pci_resource_start(pdev, 2);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
param.port, param.irq, param.membase);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
retval = b1pciv4_probe(¶m, pdev);
#else
retval = b1pci_probe(¶m, pdev);
#endif
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
param.port, param.irq, param.membase);
}
} else {
param.membase = 0;
param.port = pci_resource_start(pdev, 1);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
param.port, param.irq);
retval = b1pci_probe(¶m, pdev);
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
param.port, param.irq);
}
}
return retval;
}
static void b1pci_pci_remove(struct pci_dev *pdev)
{
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
avmcard *card = pci_get_drvdata(pdev);
if (card->dma)
b1pciv4_remove(pdev);
else
b1pci_remove(pdev);
#else
b1pci_remove(pdev);
#endif
}
static struct pci_driver b1pci_pci_driver = {
.name = "b1pci",
.id_table = b1pci_pci_tbl,
.probe = b1pci_pci_probe,
.remove = b1pci_pci_remove,
};
static struct capi_driver capi_driver_b1pci = {
.name = "b1pci",
.revision = "1.0",
};
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
static struct capi_driver capi_driver_b1pciv4 = {
.name = "b1pciv4",
.revision = "1.0",
};
#endif
static int __init b1pci_init(void)
{
char *p;
char rev[32];
int err;
if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p - 1) = 0;
} else
strcpy(rev, "1.0");
err = pci_register_driver(&b1pci_pci_driver);
if (!err) {
strlcpy(capi_driver_b1pci.revision, rev, 32);
register_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
strlcpy(capi_driver_b1pciv4.revision, rev, 32);
register_capi_driver(&capi_driver_b1pciv4);
#endif
printk(KERN_INFO "b1pci: revision %s\n", rev);
}
return err;
}
static void __exit b1pci_exit(void)
{
unregister_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
unregister_capi_driver(&capi_driver_b1pciv4);
#endif
pci_unregister_driver(&b1pci_pci_driver);
}
module_init(b1pci_init);
module_exit(b1pci_exit);
| gpl-2.0 |
JasperZ/android_bravo_kernel | arch/microblaze/kernel/traps.c | 4686 | 2027 | /*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/debug_locks.h>
#include <asm/exceptions.h>
#include <asm/unwind.h>
void trap_init(void)
{
__enable_hw_exceptions();
}
static unsigned long kstack_depth_to_print; /* 0 == entire stack */
static int __init kstack_setup(char *s)
{
return !strict_strtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);
void show_stack(struct task_struct *task, unsigned long *sp)
{
unsigned long words_to_show;
u32 fp = (u32) sp;
if (fp == 0) {
if (task) {
fp = ((struct thread_info *)
(task->stack))->cpu_context.r1;
} else {
/* Pick up caller of dump_stack() */
fp = (u32)&sp - 8;
}
}
words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2;
if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
words_to_show = kstack_depth_to_print;
pr_info("Kernel Stack:\n");
/*
* Make the first line an 'odd' size if necessary to get
* remaining lines to start at an address multiple of 0x10
*/
if (fp & 0xF) {
unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2;
if (line1_words < words_to_show) {
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32,
4, (void *)fp, line1_words << 2, 0);
fp += line1_words << 2;
words_to_show -= line1_words;
}
}
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
printk(KERN_INFO "\n\n");
pr_info("Call Trace:\n");
microblaze_unwind(task, NULL);
pr_info("\n");
if (!task)
task = current;
debug_show_held_locks(task);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
| gpl-2.0 |
MoKee/android_kernel_sony_fuji-common | arch/m68k/sun3x/time.c | 4686 | 2078 | /*
* linux/arch/m68k/sun3x/time.c
*
* Sun3x-specific time handling
*/
#include <linux/types.h>
#include <linux/kd.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/traps.h>
#include <asm/sun3x.h>
#include <asm/sun3ints.h>
#include <asm/rtc.h>
#include "time.h"
#define M_CONTROL 0xf8
#define M_SEC 0xf9
#define M_MIN 0xfa
#define M_HOUR 0xfb
#define M_DAY 0xfc
#define M_DATE 0xfd
#define M_MONTH 0xfe
#define M_YEAR 0xff
#define C_WRITE 0x80
#define C_READ 0x40
#define C_SIGN 0x20
#define C_CALIB 0x1f
int sun3x_hwclk(int set, struct rtc_time *t)
{
volatile struct mostek_dt *h =
(struct mostek_dt *)(SUN3X_EEPROM+M_CONTROL);
unsigned long flags;
local_irq_save(flags);
if(set) {
h->csr |= C_WRITE;
h->sec = bin2bcd(t->tm_sec);
h->min = bin2bcd(t->tm_min);
h->hour = bin2bcd(t->tm_hour);
h->wday = bin2bcd(t->tm_wday);
h->mday = bin2bcd(t->tm_mday);
h->month = bin2bcd(t->tm_mon);
h->year = bin2bcd(t->tm_year);
h->csr &= ~C_WRITE;
} else {
h->csr |= C_READ;
t->tm_sec = bcd2bin(h->sec);
t->tm_min = bcd2bin(h->min);
t->tm_hour = bcd2bin(h->hour);
t->tm_wday = bcd2bin(h->wday);
t->tm_mday = bcd2bin(h->mday);
t->tm_mon = bcd2bin(h->month);
t->tm_year = bcd2bin(h->year);
h->csr &= ~C_READ;
}
local_irq_restore(flags);
return 0;
}
/* Not much we can do here */
unsigned long sun3x_gettimeoffset (void)
{
return 0L;
}
#if 0
static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
{
void (*vector)(int, void *, struct pt_regs *) = dev_id;
/* Clear the pending interrupt - pulse the enable line low */
disable_irq(5);
enable_irq(5);
vector(irq, NULL, regs);
}
#endif
void __init sun3x_sched_init(irq_handler_t vector)
{
sun3_disable_interrupts();
/* Pulse enable low to get the clock started */
sun3_disable_irq(5);
sun3_enable_irq(5);
sun3_enable_interrupts();
}
| gpl-2.0 |
Split-Screen/android_kernel_xiaomi_cancro | arch/m68k/amiga/config.c | 4686 | 20371 | /*
* linux/arch/m68k/amiga/config.c
*
* Copyright (C) 1993 Hamish Macdonald
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Miscellaneous Amiga stuff
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/vt_kern.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/zorro.h>
#include <linux/module.h>
#include <linux/keyboard.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/io.h>
static unsigned long amiga_model;
unsigned long amiga_eclock;
EXPORT_SYMBOL(amiga_eclock);
unsigned long amiga_colorclock;
EXPORT_SYMBOL(amiga_colorclock);
unsigned long amiga_chipset;
EXPORT_SYMBOL(amiga_chipset);
unsigned char amiga_vblank;
EXPORT_SYMBOL(amiga_vblank);
static unsigned char amiga_psfreq;
struct amiga_hw_present amiga_hw_present;
EXPORT_SYMBOL(amiga_hw_present);
static char s_a500[] __initdata = "A500";
static char s_a500p[] __initdata = "A500+";
static char s_a600[] __initdata = "A600";
static char s_a1000[] __initdata = "A1000";
static char s_a1200[] __initdata = "A1200";
static char s_a2000[] __initdata = "A2000";
static char s_a2500[] __initdata = "A2500";
static char s_a3000[] __initdata = "A3000";
static char s_a3000t[] __initdata = "A3000T";
static char s_a3000p[] __initdata = "A3000+";
static char s_a4000[] __initdata = "A4000";
static char s_a4000t[] __initdata = "A4000T";
static char s_cdtv[] __initdata = "CDTV";
static char s_cd32[] __initdata = "CD32";
static char s_draco[] __initdata = "Draco";
static char *amiga_models[] __initdata = {
[AMI_500-AMI_500] = s_a500,
[AMI_500PLUS-AMI_500] = s_a500p,
[AMI_600-AMI_500] = s_a600,
[AMI_1000-AMI_500] = s_a1000,
[AMI_1200-AMI_500] = s_a1200,
[AMI_2000-AMI_500] = s_a2000,
[AMI_2500-AMI_500] = s_a2500,
[AMI_3000-AMI_500] = s_a3000,
[AMI_3000T-AMI_500] = s_a3000t,
[AMI_3000PLUS-AMI_500] = s_a3000p,
[AMI_4000-AMI_500] = s_a4000,
[AMI_4000T-AMI_500] = s_a4000t,
[AMI_CDTV-AMI_500] = s_cdtv,
[AMI_CD32-AMI_500] = s_cd32,
[AMI_DRACO-AMI_500] = s_draco,
};
static char amiga_model_name[13] = "Amiga ";
static void amiga_sched_init(irq_handler_t handler);
static void amiga_get_model(char *model);
static void amiga_get_hardware_list(struct seq_file *m);
/* amiga specific timer functions */
static unsigned long amiga_gettimeoffset(void);
extern void amiga_mksound(unsigned int count, unsigned int ticks);
static void amiga_reset(void);
extern void amiga_init_sound(void);
static void amiga_mem_console_write(struct console *co, const char *b,
unsigned int count);
#ifdef CONFIG_HEARTBEAT
static void amiga_heartbeat(int on);
#endif
static struct console amiga_console_driver = {
.name = "debug",
.flags = CON_PRINTBUFFER,
.index = -1,
};
/*
* Motherboard Resources present in all Amiga models
*/
static struct {
struct resource _ciab, _ciaa, _custom, _kickstart;
} mb_resources = {
._ciab = {
.name = "CIA B", .start = 0x00bfd000, .end = 0x00bfdfff
},
._ciaa = {
.name = "CIA A", .start = 0x00bfe000, .end = 0x00bfefff
},
._custom = {
.name = "Custom I/O", .start = 0x00dff000, .end = 0x00dfffff
},
._kickstart = {
.name = "Kickstart ROM", .start = 0x00f80000, .end = 0x00ffffff
}
};
static struct resource ram_resource[NUM_MEMINFO];
/*
* Parse an Amiga-specific record in the bootinfo
*/
int amiga_parse_bootinfo(const struct bi_record *record)
{
int unknown = 0;
const unsigned long *data = record->data;
switch (record->tag) {
case BI_AMIGA_MODEL:
amiga_model = *data;
break;
case BI_AMIGA_ECLOCK:
amiga_eclock = *data;
break;
case BI_AMIGA_CHIPSET:
amiga_chipset = *data;
break;
case BI_AMIGA_CHIP_SIZE:
amiga_chip_size = *(const int *)data;
break;
case BI_AMIGA_VBLANK:
amiga_vblank = *(const unsigned char *)data;
break;
case BI_AMIGA_PSFREQ:
amiga_psfreq = *(const unsigned char *)data;
break;
case BI_AMIGA_AUTOCON:
#ifdef CONFIG_ZORRO
if (zorro_num_autocon < ZORRO_NUM_AUTO) {
const struct ConfigDev *cd = (struct ConfigDev *)data;
struct zorro_dev *dev = &zorro_autocon[zorro_num_autocon++];
dev->rom = cd->cd_Rom;
dev->slotaddr = cd->cd_SlotAddr;
dev->slotsize = cd->cd_SlotSize;
dev->resource.start = (unsigned long)cd->cd_BoardAddr;
dev->resource.end = dev->resource.start + cd->cd_BoardSize - 1;
} else
printk("amiga_parse_bootinfo: too many AutoConfig devices\n");
#endif /* CONFIG_ZORRO */
break;
case BI_AMIGA_SERPER:
/* serial port period: ignored here */
break;
default:
unknown = 1;
}
return unknown;
}
/*
* Identify builtin hardware
*/
static void __init amiga_identify(void)
{
/* Fill in some default values, if necessary */
if (amiga_eclock == 0)
amiga_eclock = 709379;
memset(&amiga_hw_present, 0, sizeof(amiga_hw_present));
printk("Amiga hardware found: ");
if (amiga_model >= AMI_500 && amiga_model <= AMI_DRACO) {
printk("[%s] ", amiga_models[amiga_model-AMI_500]);
strcat(amiga_model_name, amiga_models[amiga_model-AMI_500]);
}
switch (amiga_model) {
case AMI_UNKNOWN:
goto Generic;
case AMI_600:
case AMI_1200:
AMIGAHW_SET(A1200_IDE);
AMIGAHW_SET(PCMCIA);
case AMI_500:
case AMI_500PLUS:
case AMI_1000:
case AMI_2000:
case AMI_2500:
AMIGAHW_SET(A2000_CLK); /* Is this correct for all models? */
goto Generic;
case AMI_3000:
case AMI_3000T:
AMIGAHW_SET(AMBER_FF);
AMIGAHW_SET(MAGIC_REKICK);
/* fall through */
case AMI_3000PLUS:
AMIGAHW_SET(A3000_SCSI);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
goto Generic;
case AMI_4000T:
AMIGAHW_SET(A4000_SCSI);
/* fall through */
case AMI_4000:
AMIGAHW_SET(A4000_IDE);
AMIGAHW_SET(A3000_CLK);
AMIGAHW_SET(ZORRO3);
goto Generic;
case AMI_CDTV:
case AMI_CD32:
AMIGAHW_SET(CD_ROM);
AMIGAHW_SET(A2000_CLK); /* Is this correct? */
goto Generic;
Generic:
AMIGAHW_SET(AMI_VIDEO);
AMIGAHW_SET(AMI_BLITTER);
AMIGAHW_SET(AMI_AUDIO);
AMIGAHW_SET(AMI_FLOPPY);
AMIGAHW_SET(AMI_KEYBOARD);
AMIGAHW_SET(AMI_MOUSE);
AMIGAHW_SET(AMI_SERIAL);
AMIGAHW_SET(AMI_PARALLEL);
AMIGAHW_SET(CHIP_RAM);
AMIGAHW_SET(PAULA);
switch (amiga_chipset) {
case CS_OCS:
case CS_ECS:
case CS_AGA:
switch (amiga_custom.deniseid & 0xf) {
case 0x0c:
AMIGAHW_SET(DENISE_HR);
break;
case 0x08:
AMIGAHW_SET(LISA);
break;
}
break;
default:
AMIGAHW_SET(DENISE);
break;
}
switch ((amiga_custom.vposr>>8) & 0x7f) {
case 0x00:
AMIGAHW_SET(AGNUS_PAL);
break;
case 0x10:
AMIGAHW_SET(AGNUS_NTSC);
break;
case 0x20:
case 0x21:
AMIGAHW_SET(AGNUS_HR_PAL);
break;
case 0x30:
case 0x31:
AMIGAHW_SET(AGNUS_HR_NTSC);
break;
case 0x22:
case 0x23:
AMIGAHW_SET(ALICE_PAL);
break;
case 0x32:
case 0x33:
AMIGAHW_SET(ALICE_NTSC);
break;
}
AMIGAHW_SET(ZORRO);
break;
case AMI_DRACO:
panic("No support for Draco yet");
default:
panic("Unknown Amiga Model");
}
#define AMIGAHW_ANNOUNCE(name, str) \
if (AMIGAHW_PRESENT(name)) \
printk(str)
AMIGAHW_ANNOUNCE(AMI_VIDEO, "VIDEO ");
AMIGAHW_ANNOUNCE(AMI_BLITTER, "BLITTER ");
AMIGAHW_ANNOUNCE(AMBER_FF, "AMBER_FF ");
AMIGAHW_ANNOUNCE(AMI_AUDIO, "AUDIO ");
AMIGAHW_ANNOUNCE(AMI_FLOPPY, "FLOPPY ");
AMIGAHW_ANNOUNCE(A3000_SCSI, "A3000_SCSI ");
AMIGAHW_ANNOUNCE(A4000_SCSI, "A4000_SCSI ");
AMIGAHW_ANNOUNCE(A1200_IDE, "A1200_IDE ");
AMIGAHW_ANNOUNCE(A4000_IDE, "A4000_IDE ");
AMIGAHW_ANNOUNCE(CD_ROM, "CD_ROM ");
AMIGAHW_ANNOUNCE(AMI_KEYBOARD, "KEYBOARD ");
AMIGAHW_ANNOUNCE(AMI_MOUSE, "MOUSE ");
AMIGAHW_ANNOUNCE(AMI_SERIAL, "SERIAL ");
AMIGAHW_ANNOUNCE(AMI_PARALLEL, "PARALLEL ");
AMIGAHW_ANNOUNCE(A2000_CLK, "A2000_CLK ");
AMIGAHW_ANNOUNCE(A3000_CLK, "A3000_CLK ");
AMIGAHW_ANNOUNCE(CHIP_RAM, "CHIP_RAM ");
AMIGAHW_ANNOUNCE(PAULA, "PAULA ");
AMIGAHW_ANNOUNCE(DENISE, "DENISE ");
AMIGAHW_ANNOUNCE(DENISE_HR, "DENISE_HR ");
AMIGAHW_ANNOUNCE(LISA, "LISA ");
AMIGAHW_ANNOUNCE(AGNUS_PAL, "AGNUS_PAL ");
AMIGAHW_ANNOUNCE(AGNUS_NTSC, "AGNUS_NTSC ");
AMIGAHW_ANNOUNCE(AGNUS_HR_PAL, "AGNUS_HR_PAL ");
AMIGAHW_ANNOUNCE(AGNUS_HR_NTSC, "AGNUS_HR_NTSC ");
AMIGAHW_ANNOUNCE(ALICE_PAL, "ALICE_PAL ");
AMIGAHW_ANNOUNCE(ALICE_NTSC, "ALICE_NTSC ");
AMIGAHW_ANNOUNCE(MAGIC_REKICK, "MAGIC_REKICK ");
AMIGAHW_ANNOUNCE(PCMCIA, "PCMCIA ");
if (AMIGAHW_PRESENT(ZORRO))
printk("ZORRO%s ", AMIGAHW_PRESENT(ZORRO3) ? "3" : "");
printk("\n");
#undef AMIGAHW_ANNOUNCE
}
/*
* Setup the Amiga configuration info
*/
void __init config_amiga(void)
{
int i;
amiga_identify();
/* Yuk, we don't have PCI memory */
iomem_resource.name = "Memory";
for (i = 0; i < 4; i++)
request_resource(&iomem_resource, &((struct resource *)&mb_resources)[i]);
mach_sched_init = amiga_sched_init;
mach_init_IRQ = amiga_init_IRQ;
mach_get_model = amiga_get_model;
mach_get_hardware_list = amiga_get_hardware_list;
mach_gettimeoffset = amiga_gettimeoffset;
/*
* default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
* code will not be able to allocate any mem for transfers, unless we are
* dealing with a Z2 mem only system. /Jes
*/
mach_max_dma_address = 0xffffffff;
mach_reset = amiga_reset;
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
mach_beep = amiga_mksound;
#endif
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = amiga_heartbeat;
#endif
/* Fill in the clock value (based on the 700 kHz E-Clock) */
amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
/* clear all DMA bits */
amiga_custom.dmacon = DMAF_ALL;
/* ensure that the DMA master bit is set */
amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
/* don't use Z2 RAM as system memory on Z3 capable machines */
if (AMIGAHW_PRESENT(ZORRO3)) {
int i, j;
u32 disabled_z2mem = 0;
for (i = 0; i < m68k_num_memory; i++) {
if (m68k_memory[i].addr < 16*1024*1024) {
if (i == 0) {
/* don't cut off the branch we're sitting on */
printk("Warning: kernel runs in Zorro II memory\n");
continue;
}
disabled_z2mem += m68k_memory[i].size;
m68k_num_memory--;
for (j = i; j < m68k_num_memory; j++)
m68k_memory[j] = m68k_memory[j+1];
i--;
}
}
if (disabled_z2mem)
printk("%dK of Zorro II memory will not be used as system memory\n",
disabled_z2mem>>10);
}
/* request all RAM */
for (i = 0; i < m68k_num_memory; i++) {
ram_resource[i].name =
(m68k_memory[i].addr >= 0x01000000) ? "32-bit Fast RAM" :
(m68k_memory[i].addr < 0x00c00000) ? "16-bit Fast RAM" :
"16-bit Slow RAM";
ram_resource[i].start = m68k_memory[i].addr;
ram_resource[i].end = m68k_memory[i].addr+m68k_memory[i].size-1;
request_resource(&iomem_resource, &ram_resource[i]);
}
/* initialize chipram allocator */
amiga_chip_init();
/* our beloved beeper */
if (AMIGAHW_PRESENT(AMI_AUDIO))
amiga_init_sound();
/*
* if it is an A3000, set the magic bit that forces
* a hard rekick
*/
if (AMIGAHW_PRESENT(MAGIC_REKICK))
*(unsigned char *)ZTWO_VADDR(0xde0002) |= 0x80;
}
static unsigned short jiffy_ticks;
static void __init amiga_sched_init(irq_handler_t timer_routine)
{
static struct resource sched_res = {
.name = "timer", .start = 0x00bfd400, .end = 0x00bfd5ff,
};
jiffy_ticks = DIV_ROUND_CLOSEST(amiga_eclock, HZ);
if (request_resource(&mb_resources._ciab, &sched_res))
printk("Cannot allocate ciab.ta{lo,hi}\n");
ciab.cra &= 0xC0; /* turn off timer A, continuous mode, from Eclk */
ciab.talo = jiffy_ticks % 256;
ciab.tahi = jiffy_ticks / 256;
/* install interrupt service routine for CIAB Timer A
*
* Please don't change this to use ciaa, as it interferes with the
* SCSI code. We'll have to take a look at this later
*/
if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
}
#define TICK_SIZE 10000
/* This is always executed with interrupts disabled. */
static unsigned long amiga_gettimeoffset(void)
{
unsigned short hi, lo, hi2;
unsigned long ticks, offset = 0;
/* read CIA B timer A current value */
hi = ciab.tahi;
lo = ciab.talo;
hi2 = ciab.tahi;
if (hi != hi2) {
lo = ciab.talo;
hi = hi2;
}
ticks = hi << 8 | lo;
if (ticks > jiffy_ticks / 2)
/* check for pending interrupt */
if (cia_set_irq(&ciab_base, 0) & CIA_ICR_TA)
offset = 10000;
ticks = jiffy_ticks - ticks;
ticks = (10000 * ticks) / jiffy_ticks;
return ticks + offset;
}
static void amiga_reset(void) __noreturn;
static void amiga_reset(void)
{
unsigned long jmp_addr040 = virt_to_phys(&&jmp_addr_label040);
unsigned long jmp_addr = virt_to_phys(&&jmp_addr_label);
local_irq_disable();
if (CPU_IS_040_OR_060)
/* Setup transparent translation registers for mapping
* of 16 MB kernel segment before disabling translation
*/
asm volatile ("\n"
" move.l %0,%%d0\n"
" and.l #0xff000000,%%d0\n"
" or.w #0xe020,%%d0\n" /* map 16 MB, enable, cacheable */
" .chip 68040\n"
" movec %%d0,%%itt0\n"
" movec %%d0,%%dtt0\n"
" .chip 68k\n"
" jmp %0@\n"
: /* no outputs */
: "a" (jmp_addr040)
: "d0");
else
/* for 680[23]0, just disable translation and jump to the physical
* address of the label
*/
asm volatile ("\n"
" pmove %%tc,%@\n"
" bclr #7,%@\n"
" pmove %@,%%tc\n"
" jmp %0@\n"
: /* no outputs */
: "a" (jmp_addr));
jmp_addr_label040:
/* disable translation on '040 now */
asm volatile ("\n"
" moveq #0,%%d0\n"
" .chip 68040\n"
" movec %%d0,%%tc\n" /* disable MMU */
" .chip 68k\n"
: /* no outputs */
: /* no inputs */
: "d0");
jmp_addr_label:
/* pickup reset address from AmigaOS ROM, reset devices and jump
* to reset address
*/
asm volatile ("\n"
" move.w #0x2700,%sr\n"
" lea 0x01000000,%a0\n"
" sub.l %a0@(-0x14),%a0\n"
" move.l %a0@(4),%a0\n"
" subq.l #2,%a0\n"
" jra 1f\n"
/* align on a longword boundary */
" " __ALIGN_STR "\n"
"1:\n"
" reset\n"
" jmp %a0@");
for (;;)
;
}
/*
* Debugging
*/
#define SAVEKMSG_MAXMEM 128*1024
#define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */
#define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */
struct savekmsg {
unsigned long magic1; /* SAVEKMSG_MAGIC1 */
unsigned long magic2; /* SAVEKMSG_MAGIC2 */
unsigned long magicptr; /* address of magic1 */
unsigned long size;
char data[0];
};
static struct savekmsg *savekmsg;
static void amiga_mem_console_write(struct console *co, const char *s,
unsigned int count)
{
if (savekmsg->size + count <= SAVEKMSG_MAXMEM-sizeof(struct savekmsg)) {
memcpy(savekmsg->data + savekmsg->size, s, count);
savekmsg->size += count;
}
}
static int __init amiga_savekmsg_setup(char *arg)
{
if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
return 0;
if (amiga_chip_size < SAVEKMSG_MAXMEM) {
pr_err("Not enough chipram for debugging\n");
return -ENOMEM;
}
/* Just steal the block, the chipram allocator isn't functional yet */
amiga_chip_size -= SAVEKMSG_MAXMEM;
savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
savekmsg->magic1 = SAVEKMSG_MAGIC1;
savekmsg->magic2 = SAVEKMSG_MAGIC2;
savekmsg->magicptr = ZTWO_PADDR(savekmsg);
savekmsg->size = 0;
amiga_console_driver.write = amiga_mem_console_write;
register_console(&amiga_console_driver);
return 0;
}
early_param("debug", amiga_savekmsg_setup);
static void amiga_serial_putc(char c)
{
amiga_custom.serdat = (unsigned char)c | 0x100;
while (!(amiga_custom.serdatr & 0x2000))
;
}
static void amiga_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
while (count--) {
if (*s == '\n')
amiga_serial_putc('\r');
amiga_serial_putc(*s++);
}
}
#if 0
void amiga_serial_puts(const char *s)
{
amiga_serial_console_write(NULL, s, strlen(s));
}
int amiga_serial_console_wait_key(struct console *co)
{
int ch;
while (!(amiga_custom.intreqr & IF_RBF))
barrier();
ch = amiga_custom.serdatr & 0xff;
/* clear the interrupt, so that another character can be read */
amiga_custom.intreq = IF_RBF;
return ch;
}
void amiga_serial_gets(struct console *co, char *s, int len)
{
int ch, cnt = 0;
while (1) {
ch = amiga_serial_console_wait_key(co);
/* Check for backspace. */
if (ch == 8 || ch == 127) {
if (cnt == 0) {
amiga_serial_putc('\007');
continue;
}
cnt--;
amiga_serial_puts("\010 \010");
continue;
}
/* Check for enter. */
if (ch == 10 || ch == 13)
break;
/* See if line is too long. */
if (cnt >= len + 1) {
amiga_serial_putc(7);
cnt--;
continue;
}
/* Store and echo character. */
s[cnt++] = ch;
amiga_serial_putc(ch);
}
/* Print enter. */
amiga_serial_puts("\r\n");
s[cnt] = 0;
}
#endif
static int __init amiga_debug_setup(char *arg)
{
if (MACH_IS_AMIGA && !strcmp(arg, "ser")) {
/* no initialization required (?) */
amiga_console_driver.write = amiga_serial_console_write;
register_console(&amiga_console_driver);
}
return 0;
}
early_param("debug", amiga_debug_setup);
#ifdef CONFIG_HEARTBEAT
static void amiga_heartbeat(int on)
{
if (on)
ciaa.pra &= ~2;
else
ciaa.pra |= 2;
}
#endif
/*
* Amiga specific parts of /proc
*/
static void amiga_get_model(char *model)
{
strcpy(model, amiga_model_name);
}
static void amiga_get_hardware_list(struct seq_file *m)
{
if (AMIGAHW_PRESENT(CHIP_RAM))
seq_printf(m, "Chip RAM:\t%ldK\n", amiga_chip_size>>10);
seq_printf(m, "PS Freq:\t%dHz\nEClock Freq:\t%ldHz\n",
amiga_psfreq, amiga_eclock);
if (AMIGAHW_PRESENT(AMI_VIDEO)) {
char *type;
switch (amiga_chipset) {
case CS_OCS:
type = "OCS";
break;
case CS_ECS:
type = "ECS";
break;
case CS_AGA:
type = "AGA";
break;
default:
type = "Old or Unknown";
break;
}
seq_printf(m, "Graphics:\t%s\n", type);
}
#define AMIGAHW_ANNOUNCE(name, str) \
if (AMIGAHW_PRESENT(name)) \
seq_printf (m, "\t%s\n", str)
seq_printf (m, "Detected hardware:\n");
AMIGAHW_ANNOUNCE(AMI_VIDEO, "Amiga Video");
AMIGAHW_ANNOUNCE(AMI_BLITTER, "Blitter");
AMIGAHW_ANNOUNCE(AMBER_FF, "Amber Flicker Fixer");
AMIGAHW_ANNOUNCE(AMI_AUDIO, "Amiga Audio");
AMIGAHW_ANNOUNCE(AMI_FLOPPY, "Floppy Controller");
AMIGAHW_ANNOUNCE(A3000_SCSI, "SCSI Controller WD33C93 (A3000 style)");
AMIGAHW_ANNOUNCE(A4000_SCSI, "SCSI Controller NCR53C710 (A4000T style)");
AMIGAHW_ANNOUNCE(A1200_IDE, "IDE Interface (A1200 style)");
AMIGAHW_ANNOUNCE(A4000_IDE, "IDE Interface (A4000 style)");
AMIGAHW_ANNOUNCE(CD_ROM, "Internal CD ROM drive");
AMIGAHW_ANNOUNCE(AMI_KEYBOARD, "Keyboard");
AMIGAHW_ANNOUNCE(AMI_MOUSE, "Mouse Port");
AMIGAHW_ANNOUNCE(AMI_SERIAL, "Serial Port");
AMIGAHW_ANNOUNCE(AMI_PARALLEL, "Parallel Port");
AMIGAHW_ANNOUNCE(A2000_CLK, "Hardware Clock (A2000 style)");
AMIGAHW_ANNOUNCE(A3000_CLK, "Hardware Clock (A3000 style)");
AMIGAHW_ANNOUNCE(CHIP_RAM, "Chip RAM");
AMIGAHW_ANNOUNCE(PAULA, "Paula 8364");
AMIGAHW_ANNOUNCE(DENISE, "Denise 8362");
AMIGAHW_ANNOUNCE(DENISE_HR, "Denise 8373");
AMIGAHW_ANNOUNCE(LISA, "Lisa 8375");
AMIGAHW_ANNOUNCE(AGNUS_PAL, "Normal/Fat PAL Agnus 8367/8371");
AMIGAHW_ANNOUNCE(AGNUS_NTSC, "Normal/Fat NTSC Agnus 8361/8370");
AMIGAHW_ANNOUNCE(AGNUS_HR_PAL, "Fat Hires PAL Agnus 8372");
AMIGAHW_ANNOUNCE(AGNUS_HR_NTSC, "Fat Hires NTSC Agnus 8372");
AMIGAHW_ANNOUNCE(ALICE_PAL, "PAL Alice 8374");
AMIGAHW_ANNOUNCE(ALICE_NTSC, "NTSC Alice 8374");
AMIGAHW_ANNOUNCE(MAGIC_REKICK, "Magic Hard Rekick");
AMIGAHW_ANNOUNCE(PCMCIA, "PCMCIA Slot");
#ifdef CONFIG_ZORRO
if (AMIGAHW_PRESENT(ZORRO))
seq_printf(m, "\tZorro II%s AutoConfig: %d Expansion "
"Device%s\n",
AMIGAHW_PRESENT(ZORRO3) ? "I" : "",
zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
#endif /* CONFIG_ZORRO */
#undef AMIGAHW_ANNOUNCE
}
/*
* The Amiga keyboard driver needs key_maps, but we cannot export it in
* drivers/char/defkeymap.c, as it is autogenerated
*/
#ifdef CONFIG_HW_CONSOLE
EXPORT_SYMBOL_GPL(key_maps);
#endif
| gpl-2.0 |
Jackeagle/kernel_stock_e53g | drivers/hid/hid-ezkey.c | 4686 | 2250 | /*
* HID driver for some ezkey "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2006-2007 Jiri Kosina
* Copyright (c) 2008 Jiri Slaby
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#define ez_map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
#define ez_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c))
static int ez_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
return 0;
switch (usage->hid & HID_USAGE) {
case 0x230: ez_map_key(BTN_MOUSE); break;
case 0x231: ez_map_rel(REL_WHEEL); break;
/*
* this keyboard has a scrollwheel implemented in
* totally broken way. We map this usage temporarily
* to HWHEEL and handle it in the event quirk handler
*/
case 0x232: ez_map_rel(REL_HWHEEL); break;
default:
return 0;
}
return 1;
}
static int ez_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
!usage->type)
return 0;
/* handle the temporary quirky mapping to HWHEEL */
if (usage->type == EV_REL && usage->code == REL_HWHEEL) {
struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, REL_WHEEL, -value);
return 1;
}
return 0;
}
static const struct hid_device_id ez_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ }
};
MODULE_DEVICE_TABLE(hid, ez_devices);
static struct hid_driver ez_driver = {
.name = "ezkey",
.id_table = ez_devices,
.input_mapping = ez_input_mapping,
.event = ez_event,
};
module_hid_driver(ez_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
mautz-kernel/SFOS-hammerhead-custom-kernel | drivers/hwmon/i5k_amb.c | 4942 | 16331 | /*
* A hwmon driver for the Intel 5000 series chipset FB-DIMM AMB
* temperature sensors
* Copyright (C) 2007 IBM
*
* Author: Darrick J. Wong <djwong@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define DRVNAME "i5k_amb"
#define I5K_REG_AMB_BASE_ADDR 0x48
#define I5K_REG_AMB_LEN_ADDR 0x50
#define I5K_REG_CHAN0_PRESENCE_ADDR 0x64
#define I5K_REG_CHAN1_PRESENCE_ADDR 0x66
#define AMB_REG_TEMP_MIN_ADDR 0x80
#define AMB_REG_TEMP_MID_ADDR 0x81
#define AMB_REG_TEMP_MAX_ADDR 0x82
#define AMB_REG_TEMP_STATUS_ADDR 0x84
#define AMB_REG_TEMP_ADDR 0x85
#define AMB_CONFIG_SIZE 2048
#define AMB_FUNC_3_OFFSET 768
static unsigned long amb_reg_temp_status(unsigned int amb)
{
return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_STATUS_ADDR +
AMB_CONFIG_SIZE * amb;
}
static unsigned long amb_reg_temp_min(unsigned int amb)
{
return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MIN_ADDR +
AMB_CONFIG_SIZE * amb;
}
static unsigned long amb_reg_temp_mid(unsigned int amb)
{
return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MID_ADDR +
AMB_CONFIG_SIZE * amb;
}
static unsigned long amb_reg_temp_max(unsigned int amb)
{
return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_MAX_ADDR +
AMB_CONFIG_SIZE * amb;
}
static unsigned long amb_reg_temp(unsigned int amb)
{
return AMB_FUNC_3_OFFSET + AMB_REG_TEMP_ADDR +
AMB_CONFIG_SIZE * amb;
}
#define MAX_MEM_CHANNELS 4
#define MAX_AMBS_PER_CHANNEL 16
#define MAX_AMBS (MAX_MEM_CHANNELS * \
MAX_AMBS_PER_CHANNEL)
#define CHANNEL_SHIFT 4
#define DIMM_MASK 0xF
/*
* Ugly hack: For some reason the highest bit is set if there
* are _any_ DIMMs in the channel. Attempting to read from
* this "high-order" AMB results in a memory bus error, so
* for now we'll just ignore that top bit, even though that
* might prevent us from seeing the 16th DIMM in the channel.
*/
#define REAL_MAX_AMBS_PER_CHANNEL 15
#define KNOBS_PER_AMB 6
static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit)
{
return byte_num * MAX_AMBS_PER_CHANNEL + bit;
}
#define AMB_SYSFS_NAME_LEN 16
struct i5k_device_attribute {
struct sensor_device_attribute s_attr;
char name[AMB_SYSFS_NAME_LEN];
};
struct i5k_amb_data {
struct device *hwmon_dev;
unsigned long amb_base;
unsigned long amb_len;
u16 amb_present[MAX_MEM_CHANNELS];
void __iomem *amb_mmio;
struct i5k_device_attribute *attrs;
unsigned int num_attrs;
};
static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
char *buf)
{
return sprintf(buf, "%s\n", DRVNAME);
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct platform_device *amb_pdev;
static u8 amb_read_byte(struct i5k_amb_data *data, unsigned long offset)
{
return ioread8(data->amb_mmio + offset);
}
static void amb_write_byte(struct i5k_amb_data *data, unsigned long offset,
u8 val)
{
iowrite8(val, data->amb_mmio + offset);
}
static ssize_t show_amb_alarm(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
if (!(amb_read_byte(data, amb_reg_temp_status(attr->index)) & 0x20) &&
(amb_read_byte(data, amb_reg_temp_status(attr->index)) & 0x8))
return sprintf(buf, "1\n");
else
return sprintf(buf, "0\n");
}
static ssize_t store_amb_min(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
unsigned long temp;
int ret = kstrtoul(buf, 10, &temp);
if (ret < 0)
return ret;
temp = temp / 500;
if (temp > 255)
temp = 255;
amb_write_byte(data, amb_reg_temp_min(attr->index), temp);
return count;
}
static ssize_t store_amb_mid(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
unsigned long temp;
int ret = kstrtoul(buf, 10, &temp);
if (ret < 0)
return ret;
temp = temp / 500;
if (temp > 255)
temp = 255;
amb_write_byte(data, amb_reg_temp_mid(attr->index), temp);
return count;
}
static ssize_t store_amb_max(struct device *dev,
struct device_attribute *devattr,
const char *buf,
size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
unsigned long temp;
int ret = kstrtoul(buf, 10, &temp);
if (ret < 0)
return ret;
temp = temp / 500;
if (temp > 255)
temp = 255;
amb_write_byte(data, amb_reg_temp_max(attr->index), temp);
return count;
}
static ssize_t show_amb_min(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
500 * amb_read_byte(data, amb_reg_temp_min(attr->index)));
}
static ssize_t show_amb_mid(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
500 * amb_read_byte(data, amb_reg_temp_mid(attr->index)));
}
static ssize_t show_amb_max(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
500 * amb_read_byte(data, amb_reg_temp_max(attr->index)));
}
static ssize_t show_amb_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i5k_amb_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
500 * amb_read_byte(data, amb_reg_temp(attr->index)));
}
static ssize_t show_label(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT,
attr->index & DIMM_MASK);
}
static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
{
int i, j, k, d = 0;
u16 c;
int res = 0;
int num_ambs = 0;
struct i5k_amb_data *data = platform_get_drvdata(pdev);
/* Count the number of AMBs found */
/* ignore the high-order bit, see "Ugly hack" comment above */
for (i = 0; i < MAX_MEM_CHANNELS; i++)
num_ambs += hweight16(data->amb_present[i] & 0x7fff);
/* Set up sysfs stuff */
data->attrs = kzalloc(sizeof(*data->attrs) * num_ambs * KNOBS_PER_AMB,
GFP_KERNEL);
if (!data->attrs)
return -ENOMEM;
data->num_attrs = 0;
for (i = 0; i < MAX_MEM_CHANNELS; i++) {
c = data->amb_present[i];
for (j = 0; j < REAL_MAX_AMBS_PER_CHANNEL; j++, c >>= 1) {
struct i5k_device_attribute *iattr;
k = amb_num_from_reg(i, j);
if (!(c & 0x1))
continue;
d++;
/* sysfs label */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_label", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_label;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
/* Temperature sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_input", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_temp;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
/* Temperature min sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_min", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_min;
iattr->s_attr.dev_attr.store = store_amb_min;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
/* Temperature mid sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_mid", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_mid;
iattr->s_attr.dev_attr.store = store_amb_mid;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
/* Temperature max sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_max", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IWUSR | S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_max;
iattr->s_attr.dev_attr.store = store_amb_max;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
/* Temperature alarm sysfs knob */
iattr = data->attrs + data->num_attrs;
snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
"temp%d_alarm", d);
iattr->s_attr.dev_attr.attr.name = iattr->name;
iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
iattr->s_attr.dev_attr.show = show_amb_alarm;
iattr->s_attr.index = k;
sysfs_attr_init(&iattr->s_attr.dev_attr.attr);
res = device_create_file(&pdev->dev,
&iattr->s_attr.dev_attr);
if (res)
goto exit_remove;
data->num_attrs++;
}
}
res = device_create_file(&pdev->dev, &dev_attr_name);
if (res)
goto exit_remove;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
res = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return res;
exit_remove:
device_remove_file(&pdev->dev, &dev_attr_name);
for (i = 0; i < data->num_attrs; i++)
device_remove_file(&pdev->dev, &data->attrs[i].s_attr.dev_attr);
kfree(data->attrs);
return res;
}
static int __devinit i5k_amb_add(void)
{
int res = -ENODEV;
/* only ever going to be one of these */
amb_pdev = platform_device_alloc(DRVNAME, 0);
if (!amb_pdev)
return -ENOMEM;
res = platform_device_add(amb_pdev);
if (res)
goto err;
return 0;
err:
platform_device_put(amb_pdev);
return res;
}
static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data,
unsigned long devid)
{
struct pci_dev *pcidev;
u32 val32;
int res = -ENODEV;
/* Find AMB register memory space */
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
devid,
NULL);
if (!pcidev)
return -ENODEV;
if (pci_read_config_dword(pcidev, I5K_REG_AMB_BASE_ADDR, &val32))
goto out;
data->amb_base = val32;
if (pci_read_config_dword(pcidev, I5K_REG_AMB_LEN_ADDR, &val32))
goto out;
data->amb_len = val32;
/* Is it big enough? */
if (data->amb_len < AMB_CONFIG_SIZE * MAX_AMBS) {
dev_err(&pcidev->dev, "AMB region too small!\n");
goto out;
}
res = 0;
out:
pci_dev_put(pcidev);
return res;
}
static int __devinit i5k_channel_probe(u16 *amb_present, unsigned long dev_id)
{
struct pci_dev *pcidev;
u16 val16;
int res = -ENODEV;
/* Copy the DIMM presence map for these two channels */
pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
if (!pcidev)
return -ENODEV;
if (pci_read_config_word(pcidev, I5K_REG_CHAN0_PRESENCE_ADDR, &val16))
goto out;
amb_present[0] = val16;
if (pci_read_config_word(pcidev, I5K_REG_CHAN1_PRESENCE_ADDR, &val16))
goto out;
amb_present[1] = val16;
res = 0;
out:
pci_dev_put(pcidev);
return res;
}
static struct {
unsigned long err;
unsigned long fbd0;
} chipset_ids[] __devinitdata = {
{ PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 },
{ PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 },
{ 0, 0 }
};
#ifdef MODULE
static struct pci_device_id i5k_amb_ids[] __devinitdata = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, i5k_amb_ids);
#endif
static int __devinit i5k_amb_probe(struct platform_device *pdev)
{
struct i5k_amb_data *data;
struct resource *reso;
int i, res;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/* Figure out where the AMB registers live */
i = 0;
do {
res = i5k_find_amb_registers(data, chipset_ids[i].err);
if (res == 0)
break;
i++;
} while (chipset_ids[i].err);
if (res)
goto err;
/* Copy the DIMM presence map for the first two channels */
res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0);
if (res)
goto err;
/* Copy the DIMM presence map for the optional second two channels */
i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1);
/* Set up resource regions */
reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME);
if (!reso) {
res = -EBUSY;
goto err;
}
data->amb_mmio = ioremap_nocache(data->amb_base, data->amb_len);
if (!data->amb_mmio) {
res = -EBUSY;
goto err_map_failed;
}
platform_set_drvdata(pdev, data);
res = i5k_amb_hwmon_init(pdev);
if (res)
goto err_init_failed;
return res;
err_init_failed:
iounmap(data->amb_mmio);
platform_set_drvdata(pdev, NULL);
err_map_failed:
release_mem_region(data->amb_base, data->amb_len);
err:
kfree(data);
return res;
}
static int __devexit i5k_amb_remove(struct platform_device *pdev)
{
int i;
struct i5k_amb_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->hwmon_dev);
device_remove_file(&pdev->dev, &dev_attr_name);
for (i = 0; i < data->num_attrs; i++)
device_remove_file(&pdev->dev, &data->attrs[i].s_attr.dev_attr);
kfree(data->attrs);
iounmap(data->amb_mmio);
release_mem_region(data->amb_base, data->amb_len);
platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
}
static struct platform_driver i5k_amb_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
.probe = i5k_amb_probe,
.remove = __devexit_p(i5k_amb_remove),
};
static int __init i5k_amb_init(void)
{
int res;
res = platform_driver_register(&i5k_amb_driver);
if (res)
return res;
res = i5k_amb_add();
if (res)
platform_driver_unregister(&i5k_amb_driver);
return res;
}
static void __exit i5k_amb_exit(void)
{
platform_device_unregister(amb_pdev);
platform_driver_unregister(&i5k_amb_driver);
}
MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
MODULE_DESCRIPTION("Intel 5000 chipset FB-DIMM AMB temperature sensor");
MODULE_LICENSE("GPL");
module_init(i5k_amb_init);
module_exit(i5k_amb_exit);
| gpl-2.0 |
agat63/AGAT_L720_kernel | drivers/gpu/drm/radeon/radeon_encoders.c | 4942 | 11912 | /*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *clone_encoder;
uint32_t index_mask = 0;
int count;
/* DIG routing gets problematic */
if (rdev->family >= CHIP_R600)
return index_mask;
/* LVDS/TV are too wacky */
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return index_mask;
/* DVO requires 2x ppll clocks depending on tmds chip */
if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
return index_mask;
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
count++;
if (clone_encoder == encoder)
continue;
if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
continue;
if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
continue;
else
index_mask |= (1 << count);
}
return index_mask;
}
void radeon_setup_encoder_clones(struct drm_device *dev)
{
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->possible_clones = radeon_encoder_clones(encoder);
}
}
uint32_t
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
struct radeon_device *rdev = dev->dev_private;
uint32_t ret = 0;
switch (supported_device) {
case ATOM_DEVICE_CRT1_SUPPORT:
case ATOM_DEVICE_TV1_SUPPORT:
case ATOM_DEVICE_TV2_SUPPORT:
case ATOM_DEVICE_CRT2_SUPPORT:
case ATOM_DEVICE_CV_SUPPORT:
switch (dac) {
case 1: /* dac a */
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
else
ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
break;
case 2: /* dac b */
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
else {
/*if (rdev->family == CHIP_R200)
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else*/
ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
}
break;
case 3: /* external dac */
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
}
break;
case ATOM_DEVICE_LCD1_SUPPORT:
if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
else
ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
break;
case ATOM_DEVICE_DFP1_SUPPORT:
if ((rdev->family == CHIP_RS300) ||
(rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480))
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
else
ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
break;
case ATOM_DEVICE_LCD2_SUPPORT:
case ATOM_DEVICE_DFP2_SUPPORT:
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
else if (ASIC_IS_AVIVO(rdev))
ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
else
ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
break;
case ATOM_DEVICE_DFP3_SUPPORT:
ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
break;
}
return ret;
}
void
radeon_link_encoder_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
/* walk the list and link encoders to connectors */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices)
drm_mode_connector_attach_encoder(connector, encoder);
}
}
}
void radeon_encoder_set_active_device(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
radeon_encoder->active_device, radeon_encoder->devices,
radeon_connector->devices, encoder->encoder_type);
}
}
}
struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
}
struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
if (radeon_encoder->devices & radeon_connector->devices)
return connector;
}
return NULL;
}
struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
if (radeon_encoder->is_ext_encoder)
return NULL;
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
if (other_encoder == encoder)
continue;
other_radeon_encoder = to_radeon_encoder(other_encoder);
if (other_radeon_encoder->is_ext_encoder &&
(radeon_encoder->devices & other_radeon_encoder->devices))
return other_encoder;
}
return NULL;
}
u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
{
struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return radeon_encoder->encoder_id;
default:
return ENCODER_OBJECT_ID_NONE;
}
}
return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
unsigned hblank = native_mode->htotal - native_mode->hdisplay;
unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
adjusted_mode->clock = native_mode->clock;
adjusted_mode->flags = native_mode->flags;
if (ASIC_IS_AVIVO(rdev)) {
adjusted_mode->hdisplay = native_mode->hdisplay;
adjusted_mode->vdisplay = native_mode->vdisplay;
}
adjusted_mode->htotal = native_mode->hdisplay + hblank;
adjusted_mode->hsync_start = native_mode->hdisplay + hover;
adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
adjusted_mode->vtotal = native_mode->vdisplay + vblank;
adjusted_mode->vsync_start = native_mode->vdisplay + vover;
adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
if (ASIC_IS_AVIVO(rdev)) {
adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
}
adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
}
bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
*/
if (!connector)
connector = radeon_get_connector_for_encoder_init(encoder);
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
return false;
} else {
if (pixel_clock > 165000)
return true;
else
return false;
}
} else
return false;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
return false;
} else {
if (pixel_clock > 165000)
return true;
else
return false;
}
}
default:
return false;
}
}
| gpl-2.0 |
holyangel/HTC_M8_GPE-4.4.3 | arch/arm/mach-pxa/littleton.c | 4942 | 10660 | /*
* linux/arch/arm/mach-pxa/littleton.c
*
* Support for the Marvell Littleton Development Platform.
*
* Author: Jason Chagas (largely modified code)
* Created: Nov 20, 2006
* Copyright: (C) Copyright 2006 Marvell International Ltd.
*
* 2007-11-22 modified to align with latest kernel
* eric miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/smc91x.h>
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/mfd/da903x.h>
#include <linux/i2c/max732x.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/pxa300.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <plat/pxa27x_keypad.h>
#include <mach/littleton.h>
#include <plat/pxa3xx_nand.h>
#include "generic.h"
#define GPIO_MMC1_CARD_DETECT mfp_to_gpio(MFP_PIN_GPIO15)
/* Littleton MFP configurations */
static mfp_cfg_t littleton_mfp_cfg[] __initdata = {
/* LCD */
GPIO54_LCD_LDD_0,
GPIO55_LCD_LDD_1,
GPIO56_LCD_LDD_2,
GPIO57_LCD_LDD_3,
GPIO58_LCD_LDD_4,
GPIO59_LCD_LDD_5,
GPIO60_LCD_LDD_6,
GPIO61_LCD_LDD_7,
GPIO62_LCD_LDD_8,
GPIO63_LCD_LDD_9,
GPIO64_LCD_LDD_10,
GPIO65_LCD_LDD_11,
GPIO66_LCD_LDD_12,
GPIO67_LCD_LDD_13,
GPIO68_LCD_LDD_14,
GPIO69_LCD_LDD_15,
GPIO70_LCD_LDD_16,
GPIO71_LCD_LDD_17,
GPIO72_LCD_FCLK,
GPIO73_LCD_LCLK,
GPIO74_LCD_PCLK,
GPIO75_LCD_BIAS,
/* SSP2 */
GPIO25_SSP2_SCLK,
GPIO27_SSP2_TXD,
GPIO17_GPIO, /* SFRM as chip-select */
/* Debug Ethernet */
GPIO90_GPIO,
/* Keypad */
GPIO107_KP_DKIN_0,
GPIO108_KP_DKIN_1,
GPIO115_KP_MKIN_0,
GPIO116_KP_MKIN_1,
GPIO117_KP_MKIN_2,
GPIO118_KP_MKIN_3,
GPIO119_KP_MKIN_4,
GPIO120_KP_MKIN_5,
GPIO121_KP_MKOUT_0,
GPIO122_KP_MKOUT_1,
GPIO123_KP_MKOUT_2,
GPIO124_KP_MKOUT_3,
GPIO125_KP_MKOUT_4,
/* MMC1 */
GPIO3_MMC1_DAT0,
GPIO4_MMC1_DAT1,
GPIO5_MMC1_DAT2,
GPIO6_MMC1_DAT3,
GPIO7_MMC1_CLK,
GPIO8_MMC1_CMD,
GPIO15_GPIO, /* card detect */
/* UART3 */
GPIO107_UART3_CTS,
GPIO108_UART3_RTS,
GPIO109_UART3_TXD,
GPIO110_UART3_RXD,
};
static struct resource smc91x_resources[] = {
[0] = {
.start = (LITTLETON_ETH_PHYS + 0x300),
.end = (LITTLETON_ETH_PHYS + 0xfffff),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO90)),
.end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO90)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
}
};
static struct smc91x_platdata littleton_smc91x_info = {
.flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT |
SMC91X_NOWAIT | SMC91X_USE_DMA,
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &littleton_smc91x_info,
},
};
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static struct pxafb_mode_info tpo_tdo24mtea1_modes[] = {
[0] = {
/* VGA */
.pixclock = 38250,
.xres = 480,
.yres = 640,
.bpp = 16,
.hsync_len = 8,
.left_margin = 8,
.right_margin = 24,
.vsync_len = 2,
.upper_margin = 2,
.lower_margin = 4,
.sync = 0,
},
[1] = {
/* QVGA */
.pixclock = 153000,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 8,
.left_margin = 8,
.right_margin = 88,
.vsync_len = 2,
.upper_margin = 2,
.lower_margin = 2,
.sync = 0,
},
};
static struct pxafb_mach_info littleton_lcd_info = {
.modes = tpo_tdo24mtea1_modes,
.num_modes = 2,
.lcd_conn = LCD_COLOR_TFT_16BPP,
};
static void littleton_init_lcd(void)
{
pxa_set_fb_info(NULL, &littleton_lcd_info);
}
#else
static inline void littleton_init_lcd(void) {};
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_master littleton_spi_info = {
.num_chipselect = 1,
};
static struct pxa2xx_spi_chip littleton_tdo24m_chip = {
.rx_threshold = 1,
.tx_threshold = 1,
.gpio_cs = LITTLETON_GPIO_LCD_CS,
};
static struct spi_board_info littleton_spi_devices[] __initdata = {
{
.modalias = "tdo24m",
.max_speed_hz = 1000000,
.bus_num = 2,
.chip_select = 0,
.controller_data= &littleton_tdo24m_chip,
},
};
static void __init littleton_init_spi(void)
{
pxa2xx_set_spi_info(2, &littleton_spi_info);
spi_register_board_info(ARRAY_AND_SIZE(littleton_spi_devices));
}
#else
static inline void littleton_init_spi(void) {}
#endif
#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
static unsigned int littleton_matrix_key_map[] = {
/* KEY(row, col, key_code) */
KEY(1, 3, KEY_0), KEY(0, 0, KEY_1), KEY(1, 0, KEY_2), KEY(2, 0, KEY_3),
KEY(0, 1, KEY_4), KEY(1, 1, KEY_5), KEY(2, 1, KEY_6), KEY(0, 2, KEY_7),
KEY(1, 2, KEY_8), KEY(2, 2, KEY_9),
KEY(0, 3, KEY_KPASTERISK), /* * */
KEY(2, 3, KEY_KPDOT), /* # */
KEY(5, 4, KEY_ENTER),
KEY(5, 0, KEY_UP),
KEY(5, 1, KEY_DOWN),
KEY(5, 2, KEY_LEFT),
KEY(5, 3, KEY_RIGHT),
KEY(3, 2, KEY_HOME),
KEY(4, 1, KEY_END),
KEY(3, 3, KEY_BACK),
KEY(4, 0, KEY_SEND),
KEY(4, 2, KEY_VOLUMEUP),
KEY(4, 3, KEY_VOLUMEDOWN),
KEY(3, 0, KEY_F22), /* soft1 */
KEY(3, 1, KEY_F23), /* soft2 */
};
static struct pxa27x_keypad_platform_data littleton_keypad_info = {
.matrix_key_rows = 6,
.matrix_key_cols = 5,
.matrix_key_map = littleton_matrix_key_map,
.matrix_key_map_size = ARRAY_SIZE(littleton_matrix_key_map),
.enable_rotary0 = 1,
.rotary0_up_key = KEY_UP,
.rotary0_down_key = KEY_DOWN,
.debounce_interval = 30,
};
static void __init littleton_init_keypad(void)
{
pxa_set_keypad_info(&littleton_keypad_info);
}
#else
static inline void littleton_init_keypad(void) {}
#endif
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data littleton_mci_platform_data = {
.detect_delay_ms = 200,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_card_detect = GPIO_MMC1_CARD_DETECT,
.gpio_card_ro = -1,
.gpio_power = -1,
};
static void __init littleton_init_mmc(void)
{
pxa_set_mci_info(&littleton_mci_platform_data);
}
#else
static inline void littleton_init_mmc(void) {}
#endif
#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
static struct mtd_partition littleton_nand_partitions[] = {
[0] = {
.name = "Bootloader",
.offset = 0,
.size = 0x060000,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
[1] = {
.name = "Kernel",
.offset = 0x060000,
.size = 0x200000,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
[2] = {
.name = "Filesystem",
.offset = 0x0260000,
.size = 0x3000000, /* 48M - rootfs */
},
[3] = {
.name = "MassStorage",
.offset = 0x3260000,
.size = 0x3d40000,
},
[4] = {
.name = "BBT",
.offset = 0x6FA0000,
.size = 0x80000,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
/* NOTE: we reserve some blocks at the end of the NAND flash for
* bad block management, and the max number of relocation blocks
* differs on different platforms. Please take care with it when
* defining the partition table.
*/
};
static struct pxa3xx_nand_platform_data littleton_nand_info = {
.enable_arbiter = 1,
.num_cs = 1,
.parts[0] = littleton_nand_partitions,
.nr_parts[0] = ARRAY_SIZE(littleton_nand_partitions),
};
static void __init littleton_init_nand(void)
{
pxa3xx_set_nand_info(&littleton_nand_info);
}
#else
static inline void littleton_init_nand(void) {}
#endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */
#if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE)
static struct led_info littleton_da9034_leds[] = {
[0] = {
.name = "littleton:keypad1",
.flags = DA9034_LED_RAMP,
},
[1] = {
.name = "littleton:keypad2",
.flags = DA9034_LED_RAMP,
},
[2] = {
.name = "littleton:vibra",
.flags = 0,
},
};
static struct da9034_touch_pdata littleton_da9034_touch = {
.x_inverted = 1,
.interval_ms = 20,
};
static struct da903x_subdev_info littleton_da9034_subdevs[] = {
{
.name = "da903x-led",
.id = DA9034_ID_LED_1,
.platform_data = &littleton_da9034_leds[0],
}, {
.name = "da903x-led",
.id = DA9034_ID_LED_2,
.platform_data = &littleton_da9034_leds[1],
}, {
.name = "da903x-led",
.id = DA9034_ID_VIBRA,
.platform_data = &littleton_da9034_leds[2],
}, {
.name = "da903x-backlight",
.id = DA9034_ID_WLED,
}, {
.name = "da9034-touch",
.id = DA9034_ID_TOUCH,
.platform_data = &littleton_da9034_touch,
},
};
static struct da903x_platform_data littleton_da9034_info = {
.num_subdevs = ARRAY_SIZE(littleton_da9034_subdevs),
.subdevs = littleton_da9034_subdevs,
};
static struct max732x_platform_data littleton_max7320_info = {
.gpio_base = EXT0_GPIO_BASE,
};
static struct i2c_board_info littleton_i2c_info[] = {
[0] = {
.type = "da9034",
.addr = 0x34,
.platform_data = &littleton_da9034_info,
.irq = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO18)),
},
[1] = {
.type = "max7320",
.addr = 0x50,
.platform_data = &littleton_max7320_info,
},
};
static void __init littleton_init_i2c(void)
{
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(littleton_i2c_info));
}
#else
static inline void littleton_init_i2c(void) {}
#endif /* CONFIG_I2C_PXA || CONFIG_I2C_PXA_MODULE */
static void __init littleton_init(void)
{
/* initialize MFP configurations */
pxa3xx_mfp_config(ARRAY_AND_SIZE(littleton_mfp_cfg));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
/*
* Note: we depend bootloader set the correct
* value to MSC register for SMC91x.
*/
platform_device_register(&smc91x_device);
littleton_init_spi();
littleton_init_i2c();
littleton_init_mmc();
littleton_init_lcd();
littleton_init_keypad();
littleton_init_nand();
}
MACHINE_START(LITTLETON, "Marvell Form Factor Development Platform (aka Littleton)")
.atag_offset = 0x100,
.map_io = pxa3xx_map_io,
.nr_irqs = LITTLETON_NR_IRQS,
.init_irq = pxa3xx_init_irq,
.handle_irq = pxa3xx_handle_irq,
.timer = &pxa_timer,
.init_machine = littleton_init,
.restart = pxa_restart,
MACHINE_END
| gpl-2.0 |
demo330/ZTE_MAX_N9520 | arch/avr32/oprofile/op_model_avr32.c | 6734 | 5199 | /*
* AVR32 Performance Counter Driver
*
* Copyright (C) 2005-2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Author: Ronny Pedersen
*/
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <asm/sysreg.h>
#define AVR32_PERFCTR_IRQ_GROUP 0
#define AVR32_PERFCTR_IRQ_LINE 1
void avr32_backtrace(struct pt_regs * const regs, unsigned int depth);
enum { PCCNT, PCNT0, PCNT1, NR_counter };
struct avr32_perf_counter {
unsigned long enabled;
unsigned long event;
unsigned long count;
unsigned long unit_mask;
unsigned long kernel;
unsigned long user;
u32 ie_mask;
u32 flag_mask;
};
static struct avr32_perf_counter counter[NR_counter] = {
{
.ie_mask = SYSREG_BIT(IEC),
.flag_mask = SYSREG_BIT(FC),
}, {
.ie_mask = SYSREG_BIT(IE0),
.flag_mask = SYSREG_BIT(F0),
}, {
.ie_mask = SYSREG_BIT(IE1),
.flag_mask = SYSREG_BIT(F1),
},
};
static void avr32_perf_counter_reset(void)
{
/* Reset all counter and disable/clear all interrupts */
sysreg_write(PCCR, (SYSREG_BIT(PCCR_R)
| SYSREG_BIT(PCCR_C)
| SYSREG_BIT(FC)
| SYSREG_BIT(F0)
| SYSREG_BIT(F1)));
}
static irqreturn_t avr32_perf_counter_interrupt(int irq, void *dev_id)
{
struct avr32_perf_counter *ctr = dev_id;
struct pt_regs *regs;
u32 pccr;
if (likely(!(intc_get_pending(AVR32_PERFCTR_IRQ_GROUP)
& (1 << AVR32_PERFCTR_IRQ_LINE))))
return IRQ_NONE;
regs = get_irq_regs();
pccr = sysreg_read(PCCR);
/* Clear the interrupt flags we're about to handle */
sysreg_write(PCCR, pccr);
/* PCCNT */
if (ctr->enabled && (pccr & ctr->flag_mask)) {
sysreg_write(PCCNT, -ctr->count);
oprofile_add_sample(regs, PCCNT);
}
ctr++;
/* PCNT0 */
if (ctr->enabled && (pccr & ctr->flag_mask)) {
sysreg_write(PCNT0, -ctr->count);
oprofile_add_sample(regs, PCNT0);
}
ctr++;
/* PCNT1 */
if (ctr->enabled && (pccr & ctr->flag_mask)) {
sysreg_write(PCNT1, -ctr->count);
oprofile_add_sample(regs, PCNT1);
}
return IRQ_HANDLED;
}
static int avr32_perf_counter_create_files(struct super_block *sb,
struct dentry *root)
{
struct dentry *dir;
unsigned int i;
char filename[4];
for (i = 0; i < NR_counter; i++) {
snprintf(filename, sizeof(filename), "%u", i);
dir = oprofilefs_mkdir(sb, root, filename);
oprofilefs_create_ulong(sb, dir, "enabled",
&counter[i].enabled);
oprofilefs_create_ulong(sb, dir, "event",
&counter[i].event);
oprofilefs_create_ulong(sb, dir, "count",
&counter[i].count);
/* Dummy entries */
oprofilefs_create_ulong(sb, dir, "kernel",
&counter[i].kernel);
oprofilefs_create_ulong(sb, dir, "user",
&counter[i].user);
oprofilefs_create_ulong(sb, dir, "unit_mask",
&counter[i].unit_mask);
}
return 0;
}
static int avr32_perf_counter_setup(void)
{
struct avr32_perf_counter *ctr;
u32 pccr;
int ret;
int i;
pr_debug("avr32_perf_counter_setup\n");
if (sysreg_read(PCCR) & SYSREG_BIT(PCCR_E)) {
printk(KERN_ERR
"oprofile: setup: perf counter already enabled\n");
return -EBUSY;
}
ret = request_irq(AVR32_PERFCTR_IRQ_GROUP,
avr32_perf_counter_interrupt, IRQF_SHARED,
"oprofile", counter);
if (ret)
return ret;
avr32_perf_counter_reset();
pccr = 0;
for (i = PCCNT; i < NR_counter; i++) {
ctr = &counter[i];
if (!ctr->enabled)
continue;
pr_debug("enabling counter %d...\n", i);
pccr |= ctr->ie_mask;
switch (i) {
case PCCNT:
/* PCCNT always counts cycles, so no events */
sysreg_write(PCCNT, -ctr->count);
break;
case PCNT0:
pccr |= SYSREG_BF(CONF0, ctr->event);
sysreg_write(PCNT0, -ctr->count);
break;
case PCNT1:
pccr |= SYSREG_BF(CONF1, ctr->event);
sysreg_write(PCNT1, -ctr->count);
break;
}
}
pr_debug("oprofile: writing 0x%x to PCCR...\n", pccr);
sysreg_write(PCCR, pccr);
return 0;
}
static void avr32_perf_counter_shutdown(void)
{
pr_debug("avr32_perf_counter_shutdown\n");
avr32_perf_counter_reset();
free_irq(AVR32_PERFCTR_IRQ_GROUP, counter);
}
static int avr32_perf_counter_start(void)
{
pr_debug("avr32_perf_counter_start\n");
sysreg_write(PCCR, sysreg_read(PCCR) | SYSREG_BIT(PCCR_E));
return 0;
}
static void avr32_perf_counter_stop(void)
{
pr_debug("avr32_perf_counter_stop\n");
sysreg_write(PCCR, sysreg_read(PCCR) & ~SYSREG_BIT(PCCR_E));
}
static struct oprofile_operations avr32_perf_counter_ops __initdata = {
.create_files = avr32_perf_counter_create_files,
.setup = avr32_perf_counter_setup,
.shutdown = avr32_perf_counter_shutdown,
.start = avr32_perf_counter_start,
.stop = avr32_perf_counter_stop,
.cpu_type = "avr32",
};
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
if (!(current_cpu_data.features & AVR32_FEATURE_PCTR))
return -ENODEV;
memcpy(ops, &avr32_perf_counter_ops,
sizeof(struct oprofile_operations));
ops->backtrace = avr32_backtrace;
printk(KERN_INFO "oprofile: using AVR32 performance monitoring.\n");
return 0;
}
void oprofile_arch_exit(void)
{
}
| gpl-2.0 |
klabit87/jflte_vzw_of1 | net/bridge/netfilter/ebt_ip6.c | 7502 | 4346 | /*
* ebt_ip6
*
* Authors:
* Manohar Castelino <manohar.r.castelino@intel.com>
* Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
* Jan Engelhardt <jengelh@medozas.de>
*
* Summary:
* This is just a modification of the IPv4 code written by
* Bart De Schuymer <bdschuym@pandora.be>
* with the changes required to support IPv6
*
* Jan, 2008
*/
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <linux/in.h>
#include <linux/module.h>
#include <net/dsfield.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
union pkthdr {
struct {
__be16 src;
__be16 dst;
} tcpudphdr;
struct {
u8 type;
u8 code;
} icmphdr;
};
static bool
ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
const union pkthdr *pptr;
union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
return false;
if (info->bitmask & EBT_IP6_TCLASS &&
FWINV(info->tclass != ipv6_get_dsfield(ih6), EBT_IP6_TCLASS))
return false;
if (FWINV(ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk,
&info->saddr), EBT_IP6_SOURCE) ||
FWINV(ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk,
&info->daddr), EBT_IP6_DEST))
return false;
if (info->bitmask & EBT_IP6_PROTO) {
uint8_t nexthdr = ih6->nexthdr;
__be16 frag_off;
int offset_ph;
offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off);
if (offset_ph == -1)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
if (!(info->bitmask & ( EBT_IP6_DPORT |
EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
/* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
&_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
u16 dst = ntohs(pptr->tcpudphdr.dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1], EBT_IP6_DPORT))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
u16 src = ntohs(pptr->tcpudphdr.src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1], EBT_IP6_SPORT))
return false;
}
if ((info->bitmask & EBT_IP6_ICMP6) &&
FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
pptr->icmphdr.type > info->icmpv6_type[1] ||
pptr->icmphdr.code < info->icmpv6_code[0] ||
pptr->icmphdr.code > info->icmpv6_code[1],
EBT_IP6_ICMP6))
return false;
}
return true;
}
static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
{
const struct ebt_entry *e = par->entryinfo;
struct ebt_ip6_info *info = par->matchinfo;
if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO)
return -EINVAL;
if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK)
return -EINVAL;
if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) {
if (info->invflags & EBT_IP6_PROTO)
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
}
if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
if (info->bitmask & EBT_IP6_ICMP6) {
if ((info->invflags & EBT_IP6_PROTO) ||
info->protocol != IPPROTO_ICMPV6)
return -EINVAL;
if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
info->icmpv6_code[0] > info->icmpv6_code[1])
return -EINVAL;
}
return 0;
}
static struct xt_match ebt_ip6_mt_reg __read_mostly = {
.name = "ip6",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_ip6_mt,
.checkentry = ebt_ip6_mt_check,
.matchsize = sizeof(struct ebt_ip6_info),
.me = THIS_MODULE,
};
static int __init ebt_ip6_init(void)
{
return xt_register_match(&ebt_ip6_mt_reg);
}
static void __exit ebt_ip6_fini(void)
{
xt_unregister_match(&ebt_ip6_mt_reg);
}
module_init(ebt_ip6_init);
module_exit(ebt_ip6_fini);
MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match");
MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ZolaIII/android_kernel_synopsis_nightly | drivers/scsi/fnic/fnic_fcs.c | 8270 | 19871 | /*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
#include <scsi/libfc.h>
#include "fnic_io.h"
#include "fnic.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
struct workqueue_struct *fnic_event_queue;
static void fnic_set_eth_mode(struct fnic *);
void fnic_handle_link(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, link_work);
unsigned long flags;
int old_link_status;
u32 old_link_down_cnt;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
old_link_down_cnt = fnic->link_down_cnt;
old_link_status = fnic->link_status;
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
if (old_link_status == fnic->link_status) {
if (!fnic->link_status)
/* DOWN -> DOWN */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else
/* UP -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
} else if (fnic->link_status) {
/* DOWN -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
}
}
/*
* This function passes incoming fabric frames to libFC
*/
void fnic_handle_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, frame_work);
struct fc_lport *lp = fnic->lport;
unsigned long flags;
struct sk_buff *skb;
struct fc_frame *fp;
while ((skb = skb_dequeue(&fnic->frame_queue))) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(skb);
return;
}
fp = (struct fc_frame *)skb;
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_head(&fnic->frame_queue, skb);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fc_exch_recv(lp, fp);
}
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
* @skb: Ethernet Frame.
*/
static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
{
struct fc_frame *fp;
struct ethhdr *eh;
struct fcoe_hdr *fcoe_hdr;
struct fcoe_crc_eof *ft;
/*
* Undo VLAN encapsulation if present.
*/
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
if (eh->h_proto == htons(ETH_P_FIP)) {
skb_pull(skb, sizeof(*eh));
fcoe_ctlr_recv(&fnic->ctlr, skb);
return 1; /* let caller know packet was used */
}
if (eh->h_proto != htons(ETH_P_FCOE))
goto drop;
skb_set_network_header(skb, sizeof(*eh));
skb_pull(skb, sizeof(*eh));
fcoe_hdr = (struct fcoe_hdr *)skb->data;
if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
goto drop;
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_sof(fp) = fcoe_hdr->fcoe_sof;
skb_pull(skb, sizeof(struct fcoe_hdr));
skb_reset_transport_header(skb);
ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
fr_eof(fp) = ft->fcoe_eof;
skb_trim(skb, skb->len - sizeof(*ft));
return 0;
drop:
dev_kfree_skb_irq(skb);
return -1;
}
/**
* fnic_update_mac_locked() - set data MAC address and filters.
* @fnic: fnic instance.
* @new: newly-assigned FCoE MAC address.
*
* Called with the fnic lock held.
*/
void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
{
u8 *ctl = fnic->ctlr.ctl_src_addr;
u8 *data = fnic->data_src_addr;
if (is_zero_ether_addr(new))
new = ctl;
if (!compare_ether_addr(data, new))
return;
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN);
if (compare_ether_addr(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
/**
* fnic_update_mac() - set data MAC address and filters.
* @lport: local port.
* @new: newly-assigned FCoE MAC address.
*/
void fnic_update_mac(struct fc_lport *lport, u8 *new)
{
struct fnic *fnic = lport_priv(lport);
spin_lock_irq(&fnic->fnic_lock);
fnic_update_mac_locked(fnic, new);
spin_unlock_irq(&fnic->fnic_lock);
}
/**
* fnic_set_port_id() - set the port_ID after successful FLOGI.
* @lport: local port.
* @port_id: assigned FC_ID.
* @fp: received frame containing the FLOGI accept or NULL.
*
* This is called from libfc when a new FC_ID has been assigned.
* This causes us to reset the firmware to FC_MODE and setup the new MAC
* address and FC_ID.
*
* It is also called with FC_ID 0 when we're logged off.
*
* If the FC_ID is due to point-to-point, fp may be NULL.
*/
void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
{
struct fnic *fnic = lport_priv(lport);
u8 *mac;
int ret;
FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
port_id, fp);
/*
* If we're clearing the FC_ID, change to use the ctl_src_addr.
* Set ethernet mode to send FLOGI.
*/
if (!port_id) {
fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
fnic_set_eth_mode(fnic);
return;
}
if (fp) {
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
/* non-FIP - FLOGI already accepted - ignore return */
fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
}
fnic_update_mac(lport, mac);
}
/* Change state to reflect transition to FC mode */
spin_lock_irq(&fnic->fnic_lock);
if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
else {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi resp\n",
fnic_state_to_str(fnic->state));
spin_unlock_irq(&fnic->fnic_lock);
return;
}
spin_unlock_irq(&fnic->fnic_lock);
/*
* Send FLOGI registration to firmware to set up FC mode.
* The new address will be set up when registration completes.
*/
ret = fnic_flogi_reg_handler(fnic, port_id);
if (ret < 0) {
spin_lock_irq(&fnic->fnic_lock);
if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
fnic->state = FNIC_IN_ETH_MODE;
spin_unlock_irq(&fnic->fnic_lock);
}
}
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
struct fc_frame *fp;
unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
u8 fcs_ok = 1, packet_error = 0;
u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
u32 rss_hash;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
unsigned long flags;
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE);
skb = buf->os_buf;
fp = (struct fc_frame *)skb;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
&tmpl, &fcp_bytes_written, &sof, &eof,
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
eth_hdrs_stripped = 1;
skb_trim(skb, fcp_bytes_written);
fr_sof(fp) = sof;
fr_eof(fp) = eof;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&ingress_port, &fcoe, &eop, &sop,
&rss_type, &csum_not_calc, &rss_hash,
&bytes_written, &packet_error,
&vlan_stripped, &vlan, &checksum,
&fcoe_sof, &fcoe_fc_crc_ok,
&fcoe_enc_error, &fcoe_eof,
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fcs error. dropping packet.\n");
goto drop;
}
if (fnic_import_rq_eth_pkt(fnic, skb))
return;
} else {
/* wrong CQ type*/
shost_printk(KERN_ERR, fnic->lport->host,
"fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
" x%x\n",
fcoe, fcs_ok, packet_error,
fcoe_fc_crc_ok, fcoe_enc_error);
goto drop;
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto drop;
}
fr_dev(fp) = fnic->lport;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
skb_queue_tail(&fnic->frame_queue, skb);
queue_work(fnic_event_queue, &fnic->frame_work);
return;
drop:
dev_kfree_skb_irq(skb);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
NULL);
return 0;
}
int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
{
unsigned int tot_rq_work_done = 0, cur_work_done;
unsigned int i;
int err;
for (i = 0; i < fnic->rq_count; i++) {
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
if (cur_work_done) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame can't alloc"
" frame\n");
}
tot_rq_work_done += cur_work_done;
}
return tot_rq_work_done;
}
/*
* This function is called once at init time to allocate and fill RQ
* buffers. Subsequently, it is called in the interrupt context after RQ
* buffer processing to replenish the buffers in the RQ
*/
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
u16 len;
dma_addr_t pa;
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
if (!skb) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unable to allocate RQ sk_buff\n");
return -ENOMEM;
}
skb_reset_mac_header(skb);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
fnic_queue_rq_desc(rq, skb, pa, len);
return 0;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
/**
* fnic_eth_send() - Send Ethernet frame.
* @fip: fcoe_ctlr instance.
* @skb: Ethernet Frame, FIP, without VLAN encapsulation.
*/
void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fnic *fnic = fnic_from_ctlr(fip);
struct vnic_wq *wq = &fnic->wq[0];
dma_addr_t pa;
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
vlan_hdr = (struct vlan_ethhdr *)skb_push(skb,
sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
}
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
kfree_skb(skb);
return;
}
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
fnic->vlan_hw_insert, fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
}
/*
* Send FC frame.
*/
static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
{
struct vnic_wq *wq = &fnic->wq[0];
struct sk_buff *skb;
dma_addr_t pa;
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
struct fcoe_hdr *fcoe_hdr;
struct fc_frame_header *fh;
u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
return 0;
if (!fnic->vlan_hw_insert) {
eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
eth_hdr = (struct ethhdr *)vlan_hdr;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
} else {
eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
eth_hdr->h_proto = htons(ETH_P_FCOE);
fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
}
if (fnic->ctlr.map_dest)
fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
else
memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
tot_len = skb->len;
BUG_ON(tot_len % 4);
memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
fcoe_hdr->fcoe_sof = fr_sof(fp);
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
pci_unmap_single(fnic->pdev, pa,
tot_len, PCI_DMA_TODEVICE);
ret = -1;
goto fnic_send_frame_end;
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
if (ret)
dev_kfree_skb_any(fp_skb(fp));
return ret;
}
/*
* fnic_send
* Routine to send a raw frame
*/
int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
{
struct fnic *fnic = lport_priv(lp);
unsigned long flags;
if (fnic->in_remove) {
dev_kfree_skb(fp_skb(fp));
return -1;
}
/*
* Queue frame if in a transitional state.
* This occurs while registering the Port_ID / MAC address after FLOGI.
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return fnic_send_frame(fnic, fp);
}
/**
* fnic_flush_tx() - send queued frames.
* @fnic: fnic device
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
* be queued for the stable mode that we're in, either FC or Ethernet.
*
* Called without fnic_lock held.
*/
void fnic_flush_tx(struct fnic *fnic)
{
struct sk_buff *skb;
struct fc_frame *fp;
while ((skb = skb_dequeue(&fnic->tx_queue))) {
fp = (struct fc_frame *)skb;
fnic_send_frame(fnic, fp);
}
}
/**
* fnic_set_eth_mode() - put fnic into ethernet mode.
* @fnic: fnic device
*
* Called without fnic lock held.
*/
static void fnic_set_eth_mode(struct fnic *fnic)
{
unsigned long flags;
enum fnic_state old_state;
int ret;
spin_lock_irqsave(&fnic->fnic_lock, flags);
again:
old_state = fnic->state;
switch (old_state) {
case FNIC_IN_FC_MODE:
case FNIC_IN_ETH_TRANS_FC_MODE:
default:
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
ret = fnic_fw_reset_handler(fnic);
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
goto again;
if (ret)
fnic->state = old_state;
break;
case FNIC_IN_FC_TRANS_ETH_MODE:
case FNIC_IN_ETH_MODE:
break;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
struct sk_buff *skb = buf->os_buf;
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr,
buf->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
unsigned long flags;
spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
fnic_wq_complete_frame_send, NULL);
spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
return 0;
}
int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
{
unsigned int wq_work_done = 0;
unsigned int i;
for (i = 0; i < fnic->raw_wq_count; i++) {
wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
work_to_do,
fnic_wq_cmpl_handler_cont,
NULL);
}
return wq_work_done;
}
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr,
buf->len, PCI_DMA_TODEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
| gpl-2.0 |
qtekfun/htcDesire820Kernel | kernel/drivers/uwb/driver.c | 11086 | 3902 | /*
* Ultra Wide Band
* Driver initialization, etc
*
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* FIXME: docs
*
* Life cycle: FIXME: explain
*
* UWB radio controller:
*
* 1. alloc a uwb_rc, zero it
* 2. call uwb_rc_init() on it to set it up + ops (won't do any
* kind of allocation)
* 3. register (now it is owned by the UWB stack--deregister before
* freeing/destroying).
* 4. It lives on it's own now (UWB stack handles)--when it
* disconnects, call unregister()
* 5. free it.
*
* Make sure you have a reference to the uwb_rc before calling
* any of the UWB API functions.
*
* TODO:
*
* 1. Locking and life cycle management is crappy still. All entry
* points to the UWB HCD API assume you have a reference on the
* uwb_rc structure and that it won't go away. They mutex lock it
* before doing anything.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kdev_t.h>
#include <linux/random.h>
#include "uwb-internal.h"
/* UWB stack attributes (or 'global' constants) */
/**
* If a beacon disappears for longer than this, then we consider the
* device who was represented by that beacon to be gone.
*
* ECMA-368[17.2.3, last para] establishes that a device must not
* consider a device to be its neighbour if he doesn't receive a beacon
* for more than mMaxLostBeacons. mMaxLostBeacons is defined in
* ECMA-368[17.16] as 3; because we can get only one beacon per
* superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
* for jitter and stuff and make it 500 ms.
*/
unsigned long beacon_timeout_ms = 500;
static
ssize_t beacon_timeout_ms_show(struct class *class,
struct class_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
}
static
ssize_t beacon_timeout_ms_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t size)
{
unsigned long bt;
ssize_t result;
result = sscanf(buf, "%lu", &bt);
if (result != 1)
return -EINVAL;
beacon_timeout_ms = bt;
return size;
}
static struct class_attribute uwb_class_attrs[] = {
__ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
beacon_timeout_ms_show, beacon_timeout_ms_store),
__ATTR_NULL,
};
/** Device model classes */
struct class uwb_rc_class = {
.name = "uwb_rc",
.class_attrs = uwb_class_attrs,
};
static int __init uwb_subsys_init(void)
{
int result = 0;
result = uwb_est_create();
if (result < 0) {
printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
goto error_est_init;
}
result = class_register(&uwb_rc_class);
if (result < 0)
goto error_uwb_rc_class_register;
uwb_dbg_init();
return 0;
error_uwb_rc_class_register:
uwb_est_destroy();
error_est_init:
return result;
}
module_init(uwb_subsys_init);
static void __exit uwb_subsys_exit(void)
{
uwb_dbg_exit();
class_unregister(&uwb_rc_class);
uwb_est_destroy();
return;
}
module_exit(uwb_subsys_exit);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Ultra Wide Band core");
MODULE_LICENSE("GPL");
| gpl-2.0 |
nanikjava/androidgoldfish | drivers/infiniband/hw/mlx4/doorbell.c | 12878 | 2900 | /*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "mlx4_ib.h"
struct mlx4_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
};
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
if (page->user_virt == (virt & PAGE_MASK))
goto found;
page = kmalloc(sizeof *page, GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
goto out;
}
list_add(&page->list, &context->db_page_list);
found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
out:
mutex_unlock(&context->db_page_mutex);
return err;
}
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{
mutex_lock(&context->db_page_mutex);
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
mutex_unlock(&context->db_page_mutex);
}
| gpl-2.0 |
ch33kybutt/CCCP_kernel | drivers/infiniband/hw/mlx4/doorbell.c | 12878 | 2900 | /*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "mlx4_ib.h"
struct mlx4_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
};
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
if (page->user_virt == (virt & PAGE_MASK))
goto found;
page = kmalloc(sizeof *page, GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
goto out;
}
list_add(&page->list, &context->db_page_list);
found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
out:
mutex_unlock(&context->db_page_mutex);
return err;
}
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{
mutex_lock(&context->db_page_mutex);
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
mutex_unlock(&context->db_page_mutex);
}
| gpl-2.0 |
archil-p/EZMotoLinux | drivers/infiniband/hw/mlx4/doorbell.c | 12878 | 2900 | /*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "mlx4_ib.h"
struct mlx4_ib_user_db_page {
struct list_head list;
struct ib_umem *umem;
unsigned long user_virt;
int refcnt;
};
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
struct mlx4_db *db)
{
struct mlx4_ib_user_db_page *page;
struct ib_umem_chunk *chunk;
int err = 0;
mutex_lock(&context->db_page_mutex);
list_for_each_entry(page, &context->db_page_list, list)
if (page->user_virt == (virt & PAGE_MASK))
goto found;
page = kmalloc(sizeof *page, GFP_KERNEL);
if (!page) {
err = -ENOMEM;
goto out;
}
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
goto out;
}
list_add(&page->list, &context->db_page_list);
found:
chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
out:
mutex_unlock(&context->db_page_mutex);
return err;
}
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
{
mutex_lock(&context->db_page_mutex);
if (!--db->u.user_page->refcnt) {
list_del(&db->u.user_page->list);
ib_umem_release(db->u.user_page->umem);
kfree(db->u.user_page);
}
mutex_unlock(&context->db_page_mutex);
}
| gpl-2.0 |
DaemonGG/LARP_kernel3.16.0 | drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 79 | 36909 | /*
* QLogic qlcnic NIC Driver
* Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include <linux/slab.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
#ifdef CONFIG_QLCNIC_HWMON
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#endif
#define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
}
int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
{
return -EOPNOTSUPP;
}
static ssize_t qlcnic_store_bridged_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
unsigned long new;
int ret = -EINVAL;
if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
goto err_out;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
goto err_out;
if (kstrtoul(buf, 2, &new))
goto err_out;
if (!qlcnic_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
return ret;
}
static ssize_t qlcnic_show_bridged_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int bridged_mode = 0;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
return sprintf(buf, "%d\n", bridged_mode);
}
static ssize_t qlcnic_store_diag_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
unsigned long new;
if (kstrtoul(buf, 2, &new))
return -EINVAL;
if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
adapter->flags ^= QLCNIC_DIAG_ENABLED;
return len;
}
static ssize_t qlcnic_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
}
static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
u8 *state, u8 *rate)
{
*rate = LSB(beacon);
*state = MSB(beacon);
QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
if (!*state) {
*rate = __QLCNIC_MAX_LED_RATE;
return 0;
} else if (*state > __QLCNIC_MAX_LED_STATE) {
return -EINVAL;
}
if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
return -EINVAL;
return 0;
}
static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
unsigned long h_beacon;
int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
if (kstrtoul(buf, 2, &h_beacon))
return -EINVAL;
qlcnic_get_beacon_state(adapter);
if (ahw->beacon_state == h_beacon)
return len;
rtnl_lock();
if (!ahw->beacon_state) {
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
rtnl_unlock();
return -EBUSY;
}
}
if (h_beacon)
err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
else
err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
if (!err)
ahw->beacon_state = h_beacon;
if (!ahw->beacon_state)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
rtnl_unlock();
return len;
}
static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
const char *buf, size_t len)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err, drv_sds_rings = adapter->drv_sds_rings;
u16 beacon;
u8 b_state, b_rate;
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
if (err)
return err;
qlcnic_get_beacon_state(adapter);
if (ahw->beacon_state == b_state)
return len;
rtnl_lock();
if (!ahw->beacon_state) {
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
rtnl_unlock();
return -EBUSY;
}
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
err = -EIO;
goto out;
}
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
if (err)
goto out;
set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
}
err = qlcnic_config_led(adapter, b_state, b_rate);
if (!err) {
err = len;
ahw->beacon_state = b_state;
}
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
out:
if (!ahw->beacon_state)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
rtnl_unlock();
return err;
}
static ssize_t qlcnic_store_beacon(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int err = 0;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(dev,
"LED test not supported in non privileged mode\n");
return -EOPNOTSUPP;
}
if (qlcnic_82xx_check(adapter))
err = qlcnic_82xx_store_beacon(adapter, buf, len);
else if (qlcnic_83xx_check(adapter))
err = qlcnic_83xx_store_beacon(adapter, buf, len);
else
return -EIO;
return err;
}
static ssize_t qlcnic_show_beacon(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
}
static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
size_t crb_size = 4;
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
if (offset < QLCNIC_PCI_CRBSPACE) {
if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
QLCNIC_PCI_CAMQM_END))
crb_size = 8;
else
return -EINVAL;
}
if ((size != crb_size) || (offset & (crb_size-1)))
return -EINVAL;
return 0;
}
static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
qlcnic_read_crb(adapter, buf, offset, size);
return size;
}
static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
{
if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
return -EIO;
if ((size != 8) || (offset & 0x7))
return -EIO;
return 0;
}
static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
if (ret != 0)
return ret;
if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
return -EIO;
memcpy(buf, &data, size);
return size;
}
static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
if (ret != 0)
return ret;
memcpy(&data, buf, size);
if (qlcnic_pci_mem_write_2M(adapter, offset, data))
return -EIO;
return size;
}
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
return -EINVAL;
}
static int validate_pm_config(struct qlcnic_adapter *adapter,
struct qlcnic_pm_func_cfg *pm_cfg, int count)
{
u8 src_pci_func, s_esw_id, d_esw_id;
u8 dest_pci_func;
int i, src_index, dest_index;
for (i = 0; i < count; i++) {
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
return QL_STATUS_INVALID_PARAM;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
return QL_STATUS_INVALID_PARAM;
}
return 0;
}
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
int count, rem, i, ret, index;
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
return QL_STATUS_INVALID_PARAM;
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
ret = validate_pm_config(adapter, pm_cfg, count);
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
action, pci_func);
if (ret)
return ret;
}
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
return QL_STATUS_INVALID_PARAM;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
}
return size;
}
static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u8 pci_func;
u32 count;
int i;
memset(buf, 0, size);
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
count = size / sizeof(struct qlcnic_pm_func_cfg);
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
if (pci_func >= count) {
dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
__func__, adapter->ahw->total_nic_func, count);
continue;
}
if (!adapter->npars[i].eswitch_status)
continue;
pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
}
return size;
}
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
int i, ret;
u32 op_mode;
u8 pci_func;
if (qlcnic_82xx_check(adapter))
op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
else
op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
if (qlcnic_82xx_check(adapter)) {
ret = QLC_DEV_GET_DRV(op_mode, pci_func);
} else {
ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
pci_func);
esw_cfg[i].offload_flags = 0;
}
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].mac_override != 1)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].promisc_mode != 1)
return QL_STATUS_INVALID_PARAM;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
return QL_STATUS_INVALID_PARAM;
if (!esw_cfg[i].op_type)
return QL_STATUS_INVALID_PARAM;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
return QL_STATUS_INVALID_PARAM;
break;
default:
return QL_STATUS_INVALID_PARAM;
}
}
return 0;
}
static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
int count, rem, i, ret;
int index;
u8 op_mode = 0, pci_func;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
return QL_STATUS_INVALID_PARAM;
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
ret = validate_esw_config(adapter, esw_cfg, count);
if (ret)
return ret;
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
op_mode = esw_cfg[i].op_mode;
qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
esw_cfg[i].op_mode = op_mode;
esw_cfg[i].pci_func = adapter->ahw->pci_func;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
rtnl_lock();
qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
rtnl_unlock();
break;
case QLCNIC_ADD_VLAN:
qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
break;
case QLCNIC_DEL_VLAN:
esw_cfg[i].vlan_id = 0;
qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
break;
}
}
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
goto out;
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
return QL_STATUS_INVALID_PARAM;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
npar->promisc_mode = esw_cfg[i].promisc_mode;
npar->mac_override = esw_cfg[i].mac_override;
npar->offload_flags = esw_cfg[i].offload_flags;
npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
npar->discard_tagged = esw_cfg[i].discard_tagged;
break;
case QLCNIC_ADD_VLAN:
npar->pvid = esw_cfg[i].vlan_id;
break;
case QLCNIC_DEL_VLAN:
npar->pvid = 0;
break;
}
}
out:
return size;
}
static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
u8 pci_func;
u32 count;
int i;
memset(buf, 0, size);
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
count = size / sizeof(struct qlcnic_esw_func_cfg);
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
if (pci_func >= count) {
dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
__func__, adapter->ahw->total_nic_func, count);
continue;
}
if (!adapter->npars[i].eswitch_status)
continue;
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
return size;
}
static int validate_npar_config(struct qlcnic_adapter *adapter,
struct qlcnic_npar_func_cfg *np_cfg,
int count)
{
u8 pci_func, i;
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
return QL_STATUS_INVALID_PARAM;
}
return 0;
}
static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
int i, count, rem, ret, index;
u8 pci_func;
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
return QL_STATUS_INVALID_PARAM;
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
ret = validate_npar_config(adapter, np_cfg, count);
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
nic_info.pci_func = pci_func;
nic_info.min_tx_bw = np_cfg[i].min_bw;
nic_info.max_tx_bw = np_cfg[i].max_bw;
ret = qlcnic_set_nic_info(adapter, &nic_info);
if (ret)
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
return QL_STATUS_INVALID_PARAM;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
return size;
}
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
u8 pci_func;
int i, ret;
u32 count;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
memset(buf, 0, size);
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
count = size / sizeof(struct qlcnic_npar_func_cfg);
for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (adapter->npars[i].pci_func >= count) {
dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
__func__, adapter->ahw->total_nic_func, count);
continue;
}
if (!adapter->npars[i].eswitch_status)
continue;
pci_func = adapter->npars[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
np_cfg[pci_func].pci_func = pci_func;
np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
np_cfg[pci_func].port_num = nic_info.phys_port;
np_cfg[pci_func].fw_capab = nic_info.capabilities;
np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
}
return size;
}
static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_statistics port_stats;
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
&port_stats.rx);
if (ret)
return ret;
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
&port_stats.tx);
if (ret)
return ret;
memcpy(buf, &port_stats, size);
return size;
}
static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_statistics esw_stats;
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
&esw_stats.rx);
if (ret)
return ret;
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
&esw_stats.tx);
if (ret)
return ret;
memcpy(buf, &esw_stats, size);
return size;
}
static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
if (ret)
return ret;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_TX_COUNTER);
if (ret)
return ret;
return size;
}
static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
if (ret)
return ret;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_TX_COUNTER);
if (ret)
return ret;
return size;
}
static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
int i, ret;
u32 count;
pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
ret = qlcnic_get_pci_info(adapter, pci_info);
if (ret) {
kfree(pci_info);
return ret;
}
pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
count = size / sizeof(struct qlcnic_pci_func_cfg);
for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].func_state = 0;
pci_cfg[i].port_num = pci_info[i].default_port;
pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
kfree(pci_info);
return size;
}
static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
unsigned char *p_read_buf;
int ret, count;
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
return QL_STATUS_INVALID_PARAM;
if (!buf)
return QL_STATUS_INVALID_PARAM;
count = size / sizeof(u32);
if (size % sizeof(u32))
count++;
p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
if (!p_read_buf)
return -ENOMEM;
if (qlcnic_83xx_lock_flash(adapter) != 0) {
kfree(p_read_buf);
return -EIO;
}
ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
count);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
kfree(p_read_buf);
return ret;
}
qlcnic_83xx_unlock_flash(adapter);
memcpy(buf, p_read_buf, size);
kfree(p_read_buf);
return size;
}
static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
char *buf, loff_t offset,
size_t size)
{
int i, ret, count;
unsigned char *p_cache, *p_src;
p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
if (!p_cache)
return -ENOMEM;
memcpy(p_cache, buf, size);
p_src = p_cache;
count = size / sizeof(u32);
if (qlcnic_83xx_lock_flash(adapter) != 0) {
kfree(p_cache);
return -EIO;
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
(u32 *)p_src,
QLC_83XX_FLASH_WRITE_MAX);
if (ret) {
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return 0;
}
static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
char *buf, loff_t offset, size_t size)
{
int i, ret, count;
unsigned char *p_cache, *p_src;
p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
if (!p_cache)
return -ENOMEM;
memcpy(p_cache, buf, size);
p_src = p_cache;
count = size / sizeof(u32);
if (qlcnic_83xx_lock_flash(adapter) != 0) {
kfree(p_cache);
return -EIO;
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
for (i = 0; i < count; i++) {
ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
if (ret) {
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
p_src = p_src + sizeof(u32);
offset = offset + sizeof(u32);
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return -EIO;
}
}
kfree(p_cache);
qlcnic_83xx_unlock_flash(adapter);
return 0;
}
static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset,
size_t size)
{
int ret;
static int flash_mode;
unsigned long data;
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!buf)
return QL_STATUS_INVALID_PARAM;
ret = kstrtoul(buf, 16, &data);
switch (data) {
case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
flash_mode = QLC_83XX_ERASE_MODE;
ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
if (ret) {
dev_err(&adapter->pdev->dev,
"%s failed at %d\n", __func__, __LINE__);
return -EIO;
}
break;
case QLC_83XX_FLASH_BULK_WRITE_CMD:
flash_mode = QLC_83XX_BULK_WRITE_MODE;
break;
case QLC_83XX_FLASH_WRITE_CMD:
flash_mode = QLC_83XX_WRITE_MODE;
break;
default:
if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
offset, size);
if (ret) {
dev_err(&adapter->pdev->dev,
"%s failed at %d\n",
__func__, __LINE__);
return -EIO;
}
}
if (flash_mode == QLC_83XX_WRITE_MODE) {
ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
offset, size);
if (ret) {
dev_err(&adapter->pdev->dev,
"%s failed at %d\n", __func__,
__LINE__);
return -EIO;
}
}
}
return size;
}
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_bridged_mode,
.store = qlcnic_store_bridged_mode,
};
static struct device_attribute dev_attr_diag_mode = {
.attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_diag_mode,
.store = qlcnic_store_diag_mode,
};
static struct device_attribute dev_attr_beacon = {
.attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_beacon,
.store = qlcnic_store_beacon,
};
static struct bin_attribute bin_attr_crb = {
.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_crb,
.write = qlcnic_sysfs_write_crb,
};
static struct bin_attribute bin_attr_mem = {
.attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_mem,
.write = qlcnic_sysfs_write_mem,
};
static struct bin_attribute bin_attr_npar_config = {
.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_npar_config,
.write = qlcnic_sysfs_write_npar_config,
};
static struct bin_attribute bin_attr_pci_config = {
.attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_pci_config,
.write = NULL,
};
static struct bin_attribute bin_attr_port_stats = {
.attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_get_port_stats,
.write = qlcnic_sysfs_clear_port_stats,
};
static struct bin_attribute bin_attr_esw_stats = {
.attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_get_esw_stats,
.write = qlcnic_sysfs_clear_esw_stats,
};
static struct bin_attribute bin_attr_esw_config = {
.attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_esw_config,
.write = qlcnic_sysfs_write_esw_config,
};
static struct bin_attribute bin_attr_pm_config = {
.attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_sysfs_read_pm_config,
.write = qlcnic_sysfs_write_pm_config,
};
static struct bin_attribute bin_attr_flash = {
.attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
.read = qlcnic_83xx_sysfs_flash_read_handler,
.write = qlcnic_83xx_sysfs_flash_write_handler,
};
#ifdef CONFIG_QLCNIC_HWMON
static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
unsigned int temperature = 0, value = 0;
if (qlcnic_83xx_check(adapter))
value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
else if (qlcnic_82xx_check(adapter))
value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
temperature = qlcnic_get_temp_val(value);
/* display millidegree celcius */
temperature *= 1000;
return sprintf(buf, "%u\n", temperature);
}
/* hwmon-sysfs attributes */
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
qlcnic_hwmon_show_temp, NULL, 1);
static struct attribute *qlcnic_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(qlcnic_hwmon);
void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
struct device *hwmon_dev;
/* Skip hwmon registration for a VF device */
if (qlcnic_sriov_vf_check(adapter)) {
adapter->ahw->hwmon_dev = NULL;
return;
}
hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
adapter,
qlcnic_hwmon_groups);
if (IS_ERR(hwmon_dev)) {
dev_err(dev, "Cannot register with hwmon, err=%ld\n",
PTR_ERR(hwmon_dev));
hwmon_dev = NULL;
}
adapter->ahw->hwmon_dev = hwmon_dev;
}
void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
{
struct device *hwmon_dev = adapter->ahw->hwmon_dev;
if (hwmon_dev) {
hwmon_device_unregister(hwmon_dev);
adapter->ahw->hwmon_dev = NULL;
}
}
#endif
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
if (device_create_file(dev, &dev_attr_bridged_mode))
dev_warn(dev,
"failed to create bridged_mode sysfs entry\n");
}
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
device_remove_file(dev, &dev_attr_bridged_mode);
}
static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
if (device_create_file(dev, &dev_attr_diag_mode))
dev_info(dev, "failed to create diag_mode sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_crb))
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_file(dev, &dev_attr_beacon))
dev_info(dev, "failed to create beacon sysfs entry");
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
if (device_create_bin_file(dev, &bin_attr_esw_config))
dev_info(dev, "failed to create esw config sysfs entry");
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return;
if (device_create_bin_file(dev, &bin_attr_npar_config))
dev_info(dev, "failed to create npar config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_pm_config))
dev_info(dev, "failed to create pm config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_esw_stats))
dev_info(dev, "failed to create eswitch stats sysfs entry");
}
static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
device_remove_bin_file(dev, &bin_attr_port_stats);
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
return;
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
device_remove_bin_file(dev, &bin_attr_esw_config);
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return;
device_remove_bin_file(dev, &bin_attr_npar_config);
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
}
void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
{
qlcnic_create_diag_entries(adapter);
}
void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
{
qlcnic_remove_diag_entries(adapter);
}
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
qlcnic_create_diag_entries(adapter);
if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
dev_info(dev, "failed to create flash sysfs entry\n");
}
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
qlcnic_remove_diag_entries(adapter);
sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
}
| gpl-2.0 |
cubieboard/CC-A80-u-boot | common/cmd_ext2.c | 79 | 6178 | /*
* (C) Copyright 2004
* esd gmbh <www.esd-electronics.com>
* Reinhard Arlt <reinhard.arlt@esd-electronics.com>
*
* made from cmd_reiserfs by
*
* (C) Copyright 2003 - 2004
* Sysgo Real-Time Solutions, AG <www.elinos.com>
* Pavel Bartusek <pba@sysgo.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*
*/
/*
* Ext2fs support
*/
#include <common.h>
#include <part.h>
#include <config.h>
#include <command.h>
#include <image.h>
#include <linux/ctype.h>
#include <asm/byteorder.h>
#include <ext2fs.h>
#if defined(CONFIG_CMD_USB) && defined(CONFIG_USB_STORAGE)
#include <usb.h>
#endif
#if !defined(CONFIG_DOS_PARTITION) && !defined(CONFIG_EFI_PARTITION)
#error DOS or EFI partition support must be selected
#endif
/* #define EXT2_DEBUG */
#ifdef EXT2_DEBUG
#define PRINTF(fmt,args...) printf (fmt ,##args)
#else
#define PRINTF(fmt,args...)
#endif
int do_ext2ls (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
char *filename = "/";
int dev=0;
int part=1;
char *ep;
block_dev_desc_t *dev_desc=NULL;
int part_length;
if (argc < 3)
return cmd_usage(cmdtp);
dev = (int)simple_strtoul (argv[2], &ep, 16);
dev_desc = get_dev(argv[1],dev);
if (dev_desc == NULL) {
printf ("\n** Block device %s %d not supported\n", argv[1], dev);
return 1;
}
if (*ep) {
if (*ep != ':') {
puts ("\n** Invalid boot device, use `dev[:part]' **\n");
return 1;
}
part = (int)simple_strtoul(++ep, NULL, 16);
}
if (argc == 4)
filename = argv[3];
PRINTF("Using device %s %d:%d, directory: %s\n", argv[1], dev, part, filename);
if ((part_length = ext2fs_set_blk_dev(dev_desc, part)) == 0) {
printf ("** Bad partition - %s %d:%d **\n", argv[1], dev, part);
ext2fs_close();
return 1;
}
if (!ext2fs_mount(part_length)) {
printf ("** Bad ext2 partition or disk - %s %d:%d **\n", argv[1], dev, part);
ext2fs_close();
return 1;
}
if (ext2fs_ls (filename)) {
printf ("** Error ext2fs_ls() **\n");
ext2fs_close();
return 1;
};
ext2fs_close();
return 0;
}
U_BOOT_CMD(
ext2ls, 4, 1, do_ext2ls,
"list files in a directory (default /)",
"<interface> <dev[:part]> [directory]\n"
" - list files from 'dev' on 'interface' in a 'directory'"
);
/******************************************************************************
* Ext2fs boot command intepreter. Derived from diskboot
*/
int do_ext2load (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[])
{
char *filename = NULL;
char *ep;
int dev, part = 1;
ulong addr = 0, part_length;
int filelen;
disk_partition_t info;
block_dev_desc_t *dev_desc = NULL;
char buf [12];
unsigned long count;
char *addr_str;
switch (argc) {
case 3:
addr_str = getenv("loadaddr");
if (addr_str != NULL)
addr = simple_strtoul (addr_str, NULL, 16);
else
addr = CONFIG_SYS_LOAD_ADDR;
filename = getenv ("bootfile");
count = 0;
break;
case 4:
addr = simple_strtoul (argv[3], NULL, 16);
filename = getenv ("bootfile");
count = 0;
break;
case 5:
addr = simple_strtoul (argv[3], NULL, 16);
filename = argv[4];
count = 0;
break;
case 6:
addr = simple_strtoul (argv[3], NULL, 16);
filename = argv[4];
count = simple_strtoul (argv[5], NULL, 16);
break;
default:
return cmd_usage(cmdtp);
}
if (!filename) {
puts ("** No boot file defined **\n");
return 1;
}
dev = (int)simple_strtoul (argv[2], &ep, 16);
dev_desc = get_dev(argv[1],dev);
if (dev_desc==NULL) {
printf ("** Block device %s %d not supported\n", argv[1], dev);
return 1;
}
if (*ep) {
if (*ep != ':') {
puts ("** Invalid boot device, use `dev[:part]' **\n");
return 1;
}
part = (int)simple_strtoul(++ep, NULL, 16);
}
PRINTF("Using device %s%d, partition %d\n", argv[1], dev, part);
if (part != 0) {
if (get_partition_info (dev_desc, part, &info)) {
printf ("** Bad partition %d **\n", part);
return 1;
}
if (strncmp((char *)info.type, BOOT_PART_TYPE, sizeof(info.type)) != 0) {
printf ("** Invalid partition type \"%.32s\""
" (expect \"" BOOT_PART_TYPE "\")\n",
info.type);
return 1;
}
printf ("Loading file \"%s\" "
"from %s device %d:%d (%.32s)\n",
filename,
argv[1], dev, part, info.name);
} else {
printf ("Loading file \"%s\" from %s device %d\n",
filename, argv[1], dev);
}
if ((part_length = ext2fs_set_blk_dev(dev_desc, part)) == 0) {
printf ("** Bad partition - %s %d:%d **\n", argv[1], dev, part);
ext2fs_close();
return 1;
}
if (!ext2fs_mount(part_length)) {
printf ("** Bad ext2 partition or disk - %s %d:%d **\n",
argv[1], dev, part);
ext2fs_close();
return 1;
}
filelen = ext2fs_open(filename);
if (filelen < 0) {
printf("** File not found %s\n", filename);
ext2fs_close();
return 1;
}
if ((count < filelen) && (count != 0)) {
filelen = count;
}
if (ext2fs_read((char *)addr, filelen) != filelen) {
printf("** Unable to read \"%s\" from %s %d:%d **\n",
filename, argv[1], dev, part);
ext2fs_close();
return 1;
}
ext2fs_close();
/* Loading ok, update default load address */
load_addr = addr;
printf ("%d bytes read\n", filelen);
sprintf(buf, "%X", filelen);
setenv("filesize", buf);
return 0;
}
U_BOOT_CMD(
ext2load, 6, 0, do_ext2load,
"load binary file from a Ext2 filesystem",
"<interface> <dev[:part]> [addr] [filename] [bytes]\n"
" - load binary file 'filename' from 'dev' on 'interface'\n"
" to address 'addr' from ext2 filesystem"
);
| gpl-2.0 |
aospan/media_tree | net/core/gen_estimator.c | 79 | 9048 | /*
* net/sched/gen_estimator.c Simple rate estimator.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Changes:
* Jamal Hadi Salim - moved it to net/core and reshulfed
* names to make it usable in general net subsystem.
*/
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/gen_stats.h>
/*
This code is NOT intended to be used for statistics collection,
its purpose is to provide a base for statistical multiplexing
for controlled load service.
If you need only statistics, run a user level daemon which
periodically reads byte counters.
Unfortunately, rate estimation is not a very easy task.
F.e. I did not find a simple way to estimate the current peak rate
and even failed to formulate the problem 8)8)
So I preferred not to built an estimator into the scheduler,
but run this task separately.
Ideally, it should be kernel thread(s), but for now it runs
from timers, which puts apparent top bounds on the number of rated
flows, has minimal overhead on small, but is enough
to handle controlled load service, sets of aggregates.
We measure rate over A=(1<<interval) seconds and evaluate EWMA:
avrate = avrate*(1-W) + rate*W
where W is chosen as negative power of 2: W = 2^(-ewma_log)
The resulting time constant is:
T = A/(-ln(1-W))
NOTES.
* avbps and avpps are scaled by 2^5.
* both values are reported as 32 bit unsigned values. bps can
overflow for fast links : max speed being 34360Mbit/sec
* Minimal interval is HZ/4=250msec (it is the greatest common divisor
for HZ=100 and HZ=1024 8)), maximal interval
is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
are too expensive, longer ones can be implemented
at user level painlessly.
*/
#define EST_MAX_INTERVAL 5
struct gen_estimator
{
struct list_head list;
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock;
int ewma_log;
u32 last_packets;
unsigned long avpps;
u64 last_bytes;
u64 avbps;
struct rcu_head e_rcu;
struct rb_node node;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct rcu_head head;
};
struct gen_estimator_head
{
struct timer_list timer;
struct list_head list;
};
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
/* Protects against NULL dereference */
static DEFINE_RWLOCK(est_lock);
/* Protects against soft lockup during large deletion */
static struct rb_root est_root = RB_ROOT;
static DEFINE_SPINLOCK(est_tree_lock);
static void est_timer(unsigned long arg)
{
int idx = (int)arg;
struct gen_estimator *e;
rcu_read_lock();
list_for_each_entry_rcu(e, &elist[idx].list, list) {
struct gnet_stats_basic_packed b = {0};
unsigned long rate;
u64 brate;
spin_lock(e->stats_lock);
read_lock(&est_lock);
if (e->bstats == NULL)
goto skip;
__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
brate = (b.bytes - e->last_bytes)<<(7 - idx);
e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5;
rate = b.packets - e->last_packets;
rate <<= (7 - idx);
e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
e->rate_est->pps = (e->avpps + 0xF) >> 5;
skip:
read_unlock(&est_lock);
spin_unlock(e->stats_lock);
}
if (!list_empty(&elist[idx].list))
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
rcu_read_unlock();
}
static void gen_add_node(struct gen_estimator *est)
{
struct rb_node **p = &est_root.rb_node, *parent = NULL;
while (*p) {
struct gen_estimator *e;
parent = *p;
e = rb_entry(parent, struct gen_estimator, node);
if (est->bstats > e->bstats)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&est->node, parent, p);
rb_insert_color(&est->node, &est_root);
}
static
struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est)
{
struct rb_node *p = est_root.rb_node;
while (p) {
struct gen_estimator *e;
e = rb_entry(p, struct gen_estimator, node);
if (bstats > e->bstats)
p = p->rb_right;
else if (bstats < e->bstats || rate_est != e->rate_est)
p = p->rb_left;
else
return e;
}
return NULL;
}
/**
* gen_new_estimator - create a new rate estimator
* @bstats: basic statistics
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @stats_lock: statistics lock
* @opt: rate estimator configuration TLV
*
* Creates a new rate estimator with &bstats as source and &rate_est
* as destination. A new timer with the interval specified in the
* configuration TLV is created. Upon each interval, the latest statistics
* will be read from &bstats and the estimated rate will be stored in
* &rate_est with the statistics lock grabbed during this period.
*
* Returns 0 on success or a negative error code.
*
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock,
struct nlattr *opt)
{
struct gen_estimator *est;
struct gnet_estimator *parm = nla_data(opt);
struct gnet_stats_basic_packed b = {0};
int idx;
if (nla_len(opt) < sizeof(*parm))
return -EINVAL;
if (parm->interval < -2 || parm->interval > 3)
return -EINVAL;
est = kzalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOBUFS;
__gnet_stats_copy_basic(&b, cpu_bstats, bstats);
idx = parm->interval + 2;
est->bstats = bstats;
est->rate_est = rate_est;
est->stats_lock = stats_lock;
est->ewma_log = parm->ewma_log;
est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5;
est->last_packets = b.packets;
est->avpps = rate_est->pps<<10;
est->cpu_bstats = cpu_bstats;
spin_lock_bh(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
}
if (list_empty(&elist[idx].list))
mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
spin_unlock_bh(&est_tree_lock);
return 0;
}
EXPORT_SYMBOL(gen_new_estimator);
/**
* gen_kill_estimator - remove a rate estimator
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
* Removes the rate estimator specified by &bstats and &rate_est.
*
* Note : Caller should respect an RCU grace period before freeing stats_lock
*/
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est)
{
struct gen_estimator *e;
spin_lock_bh(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
write_lock(&est_lock);
e->bstats = NULL;
write_unlock(&est_lock);
list_del_rcu(&e->list);
kfree_rcu(e, e_rcu);
}
spin_unlock_bh(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
/**
* gen_replace_estimator - replace rate estimator configuration
* @bstats: basic statistics
* @cpu_bstats: bstats per cpu
* @rate_est: rate estimator statistics
* @stats_lock: statistics lock
* @opt: rate estimator configuration TLV
*
* Replaces the configuration of a rate estimator by calling
* gen_kill_estimator() and gen_new_estimator().
*
* Returns 0 on success or a negative error code.
*/
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt)
{
gen_kill_estimator(bstats, rate_est);
return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
}
EXPORT_SYMBOL(gen_replace_estimator);
/**
* gen_estimator_active - test if estimator is currently in use
* @bstats: basic statistics
* @rate_est: rate estimator statistics
*
* Returns true if estimator is active, and false if not.
*/
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est)
{
bool res;
ASSERT_RTNL();
spin_lock_bh(&est_tree_lock);
res = gen_find_node(bstats, rate_est) != NULL;
spin_unlock_bh(&est_tree_lock);
return res;
}
EXPORT_SYMBOL(gen_estimator_active);
| gpl-2.0 |
aceofall/linux-kernel | drivers/i2c/busses/i2c-mpc.c | 79 | 21411 | /*
* (C) Copyright 2003-2004
* Humboldt Solutions Ltd, adrian@humboldt.co.uk.
* This is a combined i2c adapter and algorithm driver for the
* MPC107/Tsi107 PowerPC northbridge and processors that include
* the same I2C unit (8240, 8245, 85xx).
*
* Release 0.8
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/fsl_devices.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/mpc52xx.h>
#include <sysdev/fsl_soc.h>
#define DRV_NAME "mpc-i2c"
#define MPC_I2C_CLOCK_LEGACY 0
#define MPC_I2C_CLOCK_PRESERVE (~0U)
#define MPC_I2C_FDR 0x04
#define MPC_I2C_CR 0x08
#define MPC_I2C_SR 0x0c
#define MPC_I2C_DR 0x10
#define MPC_I2C_DFSRR 0x14
#define CCR_MEN 0x80
#define CCR_MIEN 0x40
#define CCR_MSTA 0x20
#define CCR_MTX 0x10
#define CCR_TXAK 0x08
#define CCR_RSTA 0x04
#define CSR_MCF 0x80
#define CSR_MAAS 0x40
#define CSR_MBB 0x20
#define CSR_MAL 0x10
#define CSR_SRW 0x04
#define CSR_MIF 0x02
#define CSR_RXAK 0x01
struct mpc_i2c {
struct device *dev;
void __iomem *base;
u32 interrupt;
wait_queue_head_t queue;
struct i2c_adapter adap;
int irq;
u32 real_clk;
#ifdef CONFIG_PM_SLEEP
u8 fdr, dfsrr;
#endif
struct clk *clk_per;
};
struct mpc_i2c_divider {
u16 divider;
u16 fdr; /* including dfsrr */
};
struct mpc_i2c_data {
void (*setup)(struct device_node *node, struct mpc_i2c *i2c,
u32 clock, u32 prescaler);
u32 prescaler;
};
static inline void writeccr(struct mpc_i2c *i2c, u32 x)
{
writeb(x, i2c->base + MPC_I2C_CR);
}
static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
{
struct mpc_i2c *i2c = dev_id;
if (readb(i2c->base + MPC_I2C_SR) & CSR_MIF) {
/* Read again to allow register to stabilise */
i2c->interrupt = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
wake_up(&i2c->queue);
}
return IRQ_HANDLED;
}
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
* the 9 pulses, so it's all OK.
*/
static void mpc_i2c_fixup(struct mpc_i2c *i2c)
{
int k;
u32 delay_val = 1000000 / i2c->real_clk + 1;
if (delay_val < 2)
delay_val = 2;
for (k = 9; k; k--) {
writeccr(i2c, 0);
writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
udelay(delay_val);
writeccr(i2c, CCR_MEN);
udelay(delay_val << 1);
}
}
static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing)
{
unsigned long orig_jiffies = jiffies;
u32 x;
int result = 0;
if (!i2c->irq) {
while (!(readb(i2c->base + MPC_I2C_SR) & CSR_MIF)) {
schedule();
if (time_after(jiffies, orig_jiffies + timeout)) {
dev_dbg(i2c->dev, "timeout\n");
writeccr(i2c, 0);
result = -EIO;
break;
}
}
x = readb(i2c->base + MPC_I2C_SR);
writeb(0, i2c->base + MPC_I2C_SR);
} else {
/* Interrupt mode */
result = wait_event_timeout(i2c->queue,
(i2c->interrupt & CSR_MIF), timeout);
if (unlikely(!(i2c->interrupt & CSR_MIF))) {
dev_dbg(i2c->dev, "wait timeout\n");
writeccr(i2c, 0);
result = -ETIMEDOUT;
}
x = i2c->interrupt;
i2c->interrupt = 0;
}
if (result < 0)
return result;
if (!(x & CSR_MCF)) {
dev_dbg(i2c->dev, "unfinished\n");
return -EIO;
}
if (x & CSR_MAL) {
dev_dbg(i2c->dev, "MAL\n");
return -EIO;
}
if (writing && (x & CSR_RXAK)) {
dev_dbg(i2c->dev, "No RXAK\n");
/* generate stop */
writeccr(i2c, CCR_MEN);
return -EIO;
}
return 0;
}
#if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x)
static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = {
{20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23},
{28, 0x24}, {30, 0x01}, {32, 0x25}, {34, 0x02},
{36, 0x26}, {40, 0x27}, {44, 0x04}, {48, 0x28},
{52, 0x63}, {56, 0x29}, {60, 0x41}, {64, 0x2a},
{68, 0x07}, {72, 0x2b}, {80, 0x2c}, {88, 0x09},
{96, 0x2d}, {104, 0x0a}, {112, 0x2e}, {120, 0x81},
{128, 0x2f}, {136, 0x47}, {144, 0x0c}, {160, 0x30},
{176, 0x49}, {192, 0x31}, {208, 0x4a}, {224, 0x32},
{240, 0x0f}, {256, 0x33}, {272, 0x87}, {288, 0x10},
{320, 0x34}, {352, 0x89}, {384, 0x35}, {416, 0x8a},
{448, 0x36}, {480, 0x13}, {512, 0x37}, {576, 0x14},
{640, 0x38}, {768, 0x39}, {896, 0x3a}, {960, 0x17},
{1024, 0x3b}, {1152, 0x18}, {1280, 0x3c}, {1536, 0x3d},
{1792, 0x3e}, {1920, 0x1b}, {2048, 0x3f}, {2304, 0x1c},
{2560, 0x1d}, {3072, 0x1e}, {3584, 0x7e}, {3840, 0x1f},
{4096, 0x7f}, {4608, 0x5c}, {5120, 0x5d}, {6144, 0x5e},
{7168, 0xbe}, {7680, 0x5f}, {8192, 0xbf}, {9216, 0x9c},
{10240, 0x9d}, {12288, 0x9e}, {15360, 0x9f}
};
static int mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock,
int prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
unsigned int pvr = mfspr(SPRN_PVR);
u32 divider;
int i;
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x3f -> div = 2048 */
*real_clk = mpc5xxx_get_bus_frequency(node) / 2048;
return -EINVAL;
}
/* Determine divider value */
divider = mpc5xxx_get_bus_frequency(node) / clock;
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
* is equal to or lower than the requested speed.
*/
for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_52xx); i++) {
div = &mpc_i2c_dividers_52xx[i];
/* Old MPC5200 rev A CPUs do not support the high bits */
if (div->fdr & 0xc0 && pvr == 0x80822011)
continue;
if (div->divider >= divider)
break;
}
*real_clk = mpc5xxx_get_bus_frequency(node) / div->divider;
return (int)div->fdr;
}
static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
int ret, fdr;
if (clock == MPC_I2C_CLOCK_PRESERVE) {
dev_dbg(i2c->dev, "using fdr %d\n",
readb(i2c->base + MPC_I2C_FDR));
return;
}
ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler, &i2c->real_clk);
fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
if (ret >= 0)
dev_info(i2c->dev, "clock %u Hz (fdr=%d)\n", i2c->real_clk,
fdr);
}
#else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */
static void mpc_i2c_setup_52xx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
}
#endif /* CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x */
#ifdef CONFIG_PPC_MPC512x
static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
struct device_node *node_ctrl;
void __iomem *ctrl;
const u32 *pval;
u32 idx;
/* Enable I2C interrupts for mpc5121 */
node_ctrl = of_find_compatible_node(NULL, NULL,
"fsl,mpc5121-i2c-ctrl");
if (node_ctrl) {
ctrl = of_iomap(node_ctrl, 0);
if (ctrl) {
/* Interrupt enable bits for i2c-0/1/2: bit 24/26/28 */
pval = of_get_property(node, "reg", NULL);
idx = (*pval & 0xff) / 0x20;
setbits32(ctrl, 1 << (24 + idx * 2));
iounmap(ctrl);
}
of_node_put(node_ctrl);
}
/* The clock setup for the 52xx works also fine for the 512x */
mpc_i2c_setup_52xx(node, i2c, clock, prescaler);
}
#else /* CONFIG_PPC_MPC512x */
static void mpc_i2c_setup_512x(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
}
#endif /* CONFIG_PPC_MPC512x */
#ifdef CONFIG_FSL_SOC
static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
{160, 0x0120}, {192, 0x0121}, {224, 0x0122}, {256, 0x0123},
{288, 0x0100}, {320, 0x0101}, {352, 0x0601}, {384, 0x0102},
{416, 0x0602}, {448, 0x0126}, {480, 0x0103}, {512, 0x0127},
{544, 0x0b03}, {576, 0x0104}, {608, 0x1603}, {640, 0x0105},
{672, 0x2003}, {704, 0x0b05}, {736, 0x2b03}, {768, 0x0106},
{800, 0x3603}, {832, 0x0b06}, {896, 0x012a}, {960, 0x0107},
{1024, 0x012b}, {1088, 0x1607}, {1152, 0x0108}, {1216, 0x2b07},
{1280, 0x0109}, {1408, 0x1609}, {1536, 0x010a}, {1664, 0x160a},
{1792, 0x012e}, {1920, 0x010b}, {2048, 0x012f}, {2176, 0x2b0b},
{2304, 0x010c}, {2560, 0x010d}, {2816, 0x2b0d}, {3072, 0x010e},
{3328, 0x2b0e}, {3584, 0x0132}, {3840, 0x010f}, {4096, 0x0133},
{4608, 0x0110}, {5120, 0x0111}, {6144, 0x0112}, {7168, 0x0136},
{7680, 0x0113}, {8192, 0x0137}, {9216, 0x0114}, {10240, 0x0115},
{12288, 0x0116}, {14336, 0x013a}, {15360, 0x0117}, {16384, 0x013b},
{18432, 0x0118}, {20480, 0x0119}, {24576, 0x011a}, {28672, 0x013e},
{30720, 0x011b}, {32768, 0x013f}, {36864, 0x011c}, {40960, 0x011d},
{49152, 0x011e}, {61440, 0x011f}
};
static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
struct device_node *node = NULL;
u32 __iomem *reg;
u32 val = 0;
node = of_find_node_by_name(NULL, "global-utilities");
if (node) {
const u32 *prop = of_get_property(node, "reg", NULL);
if (prop) {
/*
* Map and check POR Device Status Register 2
* (PORDEVSR2) at 0xE0014
*/
reg = ioremap(get_immrbase() + *prop + 0x14, 0x4);
if (!reg)
printk(KERN_ERR
"Error: couldn't map PORDEVSR2\n");
else
val = in_be32(reg) & 0x00000080; /* sec-cfg */
iounmap(reg);
}
}
if (node)
of_node_put(node);
return val;
}
static int mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock,
u32 prescaler, u32 *real_clk)
{
const struct mpc_i2c_divider *div = NULL;
u32 divider;
int i;
if (clock == MPC_I2C_CLOCK_LEGACY) {
/* see below - default fdr = 0x1031 -> div = 16 * 3072 */
*real_clk = fsl_get_sys_freq() / prescaler / (16 * 3072);
return -EINVAL;
}
/* Determine proper divider value */
if (of_device_is_compatible(node, "fsl,mpc8544-i2c"))
prescaler = mpc_i2c_get_sec_cfg_8xxx() ? 3 : 2;
if (!prescaler)
prescaler = 1;
divider = fsl_get_sys_freq() / clock / prescaler;
pr_debug("I2C: src_clock=%d clock=%d divider=%d\n",
fsl_get_sys_freq(), clock, divider);
/*
* We want to choose an FDR/DFSR that generates an I2C bus speed that
* is equal to or lower than the requested speed.
*/
for (i = 0; i < ARRAY_SIZE(mpc_i2c_dividers_8xxx); i++) {
div = &mpc_i2c_dividers_8xxx[i];
if (div->divider >= divider)
break;
}
*real_clk = fsl_get_sys_freq() / prescaler / div->divider;
return div ? (int)div->fdr : -EINVAL;
}
static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
int ret, fdr;
if (clock == MPC_I2C_CLOCK_PRESERVE) {
dev_dbg(i2c->dev, "using dfsrr %d, fdr %d\n",
readb(i2c->base + MPC_I2C_DFSRR),
readb(i2c->base + MPC_I2C_FDR));
return;
}
ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler, &i2c->real_clk);
fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */
writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR);
writeb((fdr >> 8) & 0xff, i2c->base + MPC_I2C_DFSRR);
if (ret >= 0)
dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n",
i2c->real_clk, fdr >> 8, fdr & 0xff);
}
#else /* !CONFIG_FSL_SOC */
static void mpc_i2c_setup_8xxx(struct device_node *node,
struct mpc_i2c *i2c,
u32 clock, u32 prescaler)
{
}
#endif /* CONFIG_FSL_SOC */
static void mpc_i2c_start(struct mpc_i2c *i2c)
{
/* Clear arbitration */
writeb(0, i2c->base + MPC_I2C_SR);
/* Start with MEN */
writeccr(i2c, CCR_MEN);
}
static void mpc_i2c_stop(struct mpc_i2c *i2c)
{
writeccr(i2c, CCR_MEN);
}
static int mpc_write(struct mpc_i2c *i2c, int target,
const u8 *data, int length, int restart)
{
int i, result;
unsigned timeout = i2c->adap.timeout;
u32 flags = restart ? CCR_RSTA : 0;
/* Start as master */
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
/* Write target byte */
writeb((target << 1), i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
for (i = 0; i < length; i++) {
/* Write data byte */
writeb(data[i], i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
}
return 0;
}
static int mpc_read(struct mpc_i2c *i2c, int target,
u8 *data, int length, int restart, bool recv_len)
{
unsigned timeout = i2c->adap.timeout;
int i, result;
u32 flags = restart ? CCR_RSTA : 0;
/* Switch to read - restart */
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags);
/* Write target address byte - this time with the read flag set */
writeb((target << 1) | 1, i2c->base + MPC_I2C_DR);
result = i2c_wait(i2c, timeout, 1);
if (result < 0)
return result;
if (length) {
if (length == 1 && !recv_len)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK);
else
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA);
/* Dummy read */
readb(i2c->base + MPC_I2C_DR);
}
for (i = 0; i < length; i++) {
u8 byte;
result = i2c_wait(i2c, timeout, 0);
if (result < 0)
return result;
/*
* For block reads, we have to know the total length (1st byte)
* before we can determine if we are done.
*/
if (i || !recv_len) {
/* Generate txack on next to last byte */
if (i == length - 2)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
| CCR_TXAK);
/* Do not generate stop on last byte */
if (i == length - 1)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
| CCR_MTX);
}
byte = readb(i2c->base + MPC_I2C_DR);
/*
* Adjust length if first received byte is length.
* The length is 1 length byte plus actually data length
*/
if (i == 0 && recv_len) {
if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
length += byte;
/*
* For block reads, generate txack here if data length
* is 1 byte (total length is 2 bytes).
*/
if (length == 2)
writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA
| CCR_TXAK);
}
data[i] = byte;
}
return length;
}
static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct i2c_msg *pmsg;
int i;
int ret = 0;
unsigned long orig_jiffies = jiffies;
struct mpc_i2c *i2c = i2c_get_adapdata(adap);
mpc_i2c_start(i2c);
/* Allow bus up to 1s to become not busy */
while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
if (signal_pending(current)) {
dev_dbg(i2c->dev, "Interrupted\n");
writeccr(i2c, 0);
return -EINTR;
}
if (time_after(jiffies, orig_jiffies + HZ)) {
u8 status = readb(i2c->base + MPC_I2C_SR);
dev_dbg(i2c->dev, "timeout\n");
if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
writeb(status & ~CSR_MAL,
i2c->base + MPC_I2C_SR);
mpc_i2c_fixup(i2c);
}
return -EIO;
}
schedule();
}
for (i = 0; ret >= 0 && i < num; i++) {
pmsg = &msgs[i];
dev_dbg(i2c->dev,
"Doing %s %d bytes to 0x%02x - %d of %d messages\n",
pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->len, pmsg->addr, i + 1, num);
if (pmsg->flags & I2C_M_RD) {
bool recv_len = pmsg->flags & I2C_M_RECV_LEN;
ret = mpc_read(i2c, pmsg->addr, pmsg->buf, pmsg->len, i,
recv_len);
if (recv_len && ret > 0)
pmsg->len = ret;
} else {
ret =
mpc_write(i2c, pmsg->addr, pmsg->buf, pmsg->len, i);
}
}
mpc_i2c_stop(i2c); /* Initiate STOP */
orig_jiffies = jiffies;
/* Wait until STOP is seen, allow up to 1 s */
while (readb(i2c->base + MPC_I2C_SR) & CSR_MBB) {
if (time_after(jiffies, orig_jiffies + HZ)) {
u8 status = readb(i2c->base + MPC_I2C_SR);
dev_dbg(i2c->dev, "timeout\n");
if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) {
writeb(status & ~CSR_MAL,
i2c->base + MPC_I2C_SR);
mpc_i2c_fixup(i2c);
}
return -EIO;
}
cond_resched();
}
return (ret < 0) ? ret : num;
}
static u32 mpc_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
| I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
}
static const struct i2c_algorithm mpc_algo = {
.master_xfer = mpc_xfer,
.functionality = mpc_functionality,
};
static struct i2c_adapter mpc_ops = {
.owner = THIS_MODULE,
.algo = &mpc_algo,
.timeout = HZ,
};
static const struct of_device_id mpc_i2c_of_match[];
static int fsl_i2c_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct mpc_i2c *i2c;
const u32 *prop;
u32 clock = MPC_I2C_CLOCK_LEGACY;
int result = 0;
int plen;
struct resource res;
struct clk *clk;
int err;
match = of_match_device(mpc_i2c_of_match, &op->dev);
if (!match)
return -EINVAL;
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
i2c->dev = &op->dev; /* for debug and error output */
init_waitqueue_head(&i2c->queue);
i2c->base = of_iomap(op->dev.of_node, 0);
if (!i2c->base) {
dev_err(i2c->dev, "failed to map controller\n");
result = -ENOMEM;
goto fail_map;
}
i2c->irq = irq_of_parse_and_map(op->dev.of_node, 0);
if (i2c->irq) { /* no i2c->irq implies polling */
result = request_irq(i2c->irq, mpc_i2c_isr,
IRQF_SHARED, "i2c-mpc", i2c);
if (result < 0) {
dev_err(i2c->dev, "failed to attach interrupt\n");
goto fail_request;
}
}
/*
* enable clock for the I2C peripheral (non fatal),
* keep a reference upon successful allocation
*/
clk = devm_clk_get(&op->dev, NULL);
if (!IS_ERR(clk)) {
err = clk_prepare_enable(clk);
if (err) {
dev_err(&op->dev, "failed to enable clock\n");
goto fail_request;
} else {
i2c->clk_per = clk;
}
}
if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
prop = of_get_property(op->dev.of_node, "clock-frequency",
&plen);
if (prop && plen == sizeof(u32))
clock = *prop;
}
if (match->data) {
const struct mpc_i2c_data *data = match->data;
data->setup(op->dev.of_node, i2c, clock, data->prescaler);
} else {
/* Backwards compatibility */
if (of_get_property(op->dev.of_node, "dfsrr", NULL))
mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0);
}
prop = of_get_property(op->dev.of_node, "fsl,timeout", &plen);
if (prop && plen == sizeof(u32)) {
mpc_ops.timeout = *prop * HZ / 1000000;
if (mpc_ops.timeout < 5)
mpc_ops.timeout = 5;
}
dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ);
platform_set_drvdata(op, i2c);
i2c->adap = mpc_ops;
of_address_to_resource(op->dev.of_node, 0, &res);
scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
"MPC adapter at 0x%llx", (unsigned long long)res.start);
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &op->dev;
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
return result;
fail_add:
if (i2c->clk_per)
clk_disable_unprepare(i2c->clk_per);
free_irq(i2c->irq, i2c);
fail_request:
irq_dispose_mapping(i2c->irq);
iounmap(i2c->base);
fail_map:
kfree(i2c);
return result;
};
static int fsl_i2c_remove(struct platform_device *op)
{
struct mpc_i2c *i2c = platform_get_drvdata(op);
i2c_del_adapter(&i2c->adap);
if (i2c->clk_per)
clk_disable_unprepare(i2c->clk_per);
if (i2c->irq)
free_irq(i2c->irq, i2c);
irq_dispose_mapping(i2c->irq);
iounmap(i2c->base);
kfree(i2c);
return 0;
};
#ifdef CONFIG_PM_SLEEP
static int mpc_i2c_suspend(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
return 0;
}
static int mpc_i2c_resume(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
return 0;
}
static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
#define MPC_I2C_PM_OPS (&mpc_i2c_pm_ops)
#else
#define MPC_I2C_PM_OPS NULL
#endif
static const struct mpc_i2c_data mpc_i2c_data_512x = {
.setup = mpc_i2c_setup_512x,
};
static const struct mpc_i2c_data mpc_i2c_data_52xx = {
.setup = mpc_i2c_setup_52xx,
};
static const struct mpc_i2c_data mpc_i2c_data_8313 = {
.setup = mpc_i2c_setup_8xxx,
};
static const struct mpc_i2c_data mpc_i2c_data_8543 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 2,
};
static const struct mpc_i2c_data mpc_i2c_data_8544 = {
.setup = mpc_i2c_setup_8xxx,
.prescaler = 3,
};
static const struct of_device_id mpc_i2c_of_match[] = {
{.compatible = "mpc5200-i2c", .data = &mpc_i2c_data_52xx, },
{.compatible = "fsl,mpc5200b-i2c", .data = &mpc_i2c_data_52xx, },
{.compatible = "fsl,mpc5200-i2c", .data = &mpc_i2c_data_52xx, },
{.compatible = "fsl,mpc5121-i2c", .data = &mpc_i2c_data_512x, },
{.compatible = "fsl,mpc8313-i2c", .data = &mpc_i2c_data_8313, },
{.compatible = "fsl,mpc8543-i2c", .data = &mpc_i2c_data_8543, },
{.compatible = "fsl,mpc8544-i2c", .data = &mpc_i2c_data_8544, },
/* Backward compatibility */
{.compatible = "fsl-i2c", },
{},
};
MODULE_DEVICE_TABLE(of, mpc_i2c_of_match);
/* Structure for a device driver */
static struct platform_driver mpc_i2c_driver = {
.probe = fsl_i2c_probe,
.remove = fsl_i2c_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
.pm = MPC_I2C_PM_OPS,
},
};
module_platform_driver(mpc_i2c_driver);
MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>");
MODULE_DESCRIPTION("I2C-Bus adapter for MPC107 bridge and "
"MPC824x/83xx/85xx/86xx/512x/52xx processors");
MODULE_LICENSE("GPL");
| gpl-2.0 |
evitareul/android_kernel_htc_evitareul | crypto/tcrypt.c | 335 | 28622 | /*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
* Adrian Hoban <adrian.hoban@intel.com>
* Gabriele Paoloni <gabriele.paoloni@intel.com>
* Tadeusz Struk (tadeusz.struk@intel.com)
* Copyright (c) 2010, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include "tcrypt.h"
#include "internal.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define TVMEMSIZE 4
/*
* Used by test_cipher_speed()
*/
#define ENCRYPT 1
#define DECRYPT 0
/*
* Used by test_cipher_speed()
*/
static unsigned int sec;
static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
"lzo", "cts", "zlib", NULL
};
static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
struct scatterlist *sg, int blen, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
else
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
if (ret)
return ret;
}
printk("%d operations in %d seconds (%ld bytes)\n",
bcount, sec, (long)bcount * blen);
return 0;
}
static int test_cipher_cycles(struct blkcipher_desc *desc, int enc,
struct scatterlist *sg, int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
else
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
else
ret = crypto_blkcipher_decrypt(desc, sg, sg, blen);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
local_irq_enable();
local_bh_enable();
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
unsigned int ret, i, j, iv_len;
const char *key;
char iv[128];
struct crypto_blkcipher *tfm;
struct blkcipher_desc desc;
const char *e;
u32 *b_size;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
printk("\ntesting speed of %s %s\n", algo, e);
tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
printk("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
i = 0;
do {
b_size = block_sizes;
do {
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
printk("template (%u) too big for "
"tvmem (%lu)\n", *keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
printk("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
/* set key, plain text and IV */
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
ret = crypto_blkcipher_setkey(tfm, key, *keysize);
if (ret) {
printk("setkey() failed flags=%x\n",
crypto_blkcipher_get_flags(tfm));
goto out;
}
sg_init_table(sg, TVMEMSIZE);
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
for (j = 1; j < TVMEMSIZE; j++) {
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
memset (tvmem[j], 0xff, PAGE_SIZE);
}
iv_len = crypto_blkcipher_ivsize(tfm);
if (iv_len) {
memset(&iv, 0xff, iv_len);
crypto_blkcipher_set_iv(tfm, iv, iv_len);
}
if (sec)
ret = test_cipher_jiffies(&desc, enc, sg,
*b_size, sec);
else
ret = test_cipher_cycles(&desc, enc, sg,
*b_size);
if (ret) {
printk("%s() failed flags=%x\n", e, desc.flags);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
crypto_free_blkcipher(tfm);
}
static int test_hash_jiffies_digest(struct hash_desc *desc,
struct scatterlist *sg, int blen,
char *out, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = crypto_hash_digest(desc, sg, blen, out);
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
int blen, int plen, char *out, int sec)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_hash_jiffies_digest(desc, sg, blen, out, sec);
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = crypto_hash_init(desc);
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = crypto_hash_update(desc, sg, plen);
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = crypto_hash_final(desc, out);
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_hash_cycles_digest(struct hash_desc *desc,
struct scatterlist *sg, int blen, char *out)
{
unsigned long cycles = 0;
int i;
int ret;
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = crypto_hash_digest(desc, sg, blen, out);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = crypto_hash_digest(desc, sg, blen, out);
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
local_irq_enable();
local_bh_enable();
if (ret)
return ret;
printk("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
int blen, int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount;
int ret;
if (plen == blen)
return test_hash_cycles_digest(desc, sg, blen, out);
local_bh_disable();
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = crypto_hash_init(desc);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = crypto_hash_update(desc, sg, plen);
if (ret)
goto out;
}
ret = crypto_hash_final(desc, out);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = crypto_hash_init(desc);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = crypto_hash_update(desc, sg, plen);
if (ret)
goto out;
}
ret = crypto_hash_final(desc, out);
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
local_irq_enable();
local_bh_enable();
if (ret)
return ret;
printk("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
}
static void test_hash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
struct scatterlist sg[TVMEMSIZE];
struct crypto_hash *tfm;
struct hash_desc desc;
static char output[1024];
int i;
int ret;
printk(KERN_INFO "\ntesting speed of %s\n", algo);
tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
desc.tfm = tfm;
desc.flags = 0;
if (crypto_hash_digestsize(tfm) > sizeof(output)) {
printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
crypto_hash_digestsize(tfm), sizeof(output));
goto out;
}
test_hash_sg_init(sg);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
printk(KERN_ERR
"template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
printk(KERN_INFO "test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
if (sec)
ret = test_hash_jiffies(&desc, sg, speed[i].blen,
speed[i].plen, output, sec);
else
ret = test_hash_cycles(&desc, sg, speed[i].blen,
speed[i].plen, output);
if (ret) {
printk(KERN_ERR "hashing failed ret=%d\n", ret);
break;
}
}
out:
crypto_free_hash(tfm);
}
struct tcrypt_result {
struct completion completion;
int err;
};
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
INIT_COMPLETION(tr->completion);
}
return ret;
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int sec)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_ahash_jiffies(struct ahash_request *req, int blen,
int plen, char *out, int sec)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_ahash_jiffies_digest(req, blen, out, sec);
for (start = jiffies, end = start + sec * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = crypto_ahash_init(req);
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / sec, ((long)bcount * blen) / sec);
return 0;
}
static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
char *out)
{
unsigned long cycles = 0;
int ret, i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_ahash_cycles(struct ahash_request *req, int blen,
int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount, ret;
if (plen == blen)
return test_ahash_cycles_digest(req, blen, out);
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = crypto_ahash_init(req);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = crypto_ahash_init(req);
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_ahash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
struct scatterlist sg[TVMEMSIZE];
struct tcrypt_result tresult;
struct ahash_request *req;
struct crypto_ahash *tfm;
static char output[1024];
int i, ret;
printk(KERN_INFO "\ntesting speed of async %s\n", algo);
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
return;
}
if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
pr_err("digestsize(%u) > outputbuffer(%zu)\n",
crypto_ahash_digestsize(tfm), sizeof(output));
goto out;
}
test_hash_sg_init(sg);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("ahash request allocation failure\n");
goto out;
}
init_completion(&tresult.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
break;
}
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (sec)
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, sec);
else
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
break;
}
}
ahash_request_free(req);
out:
crypto_free_ahash(tfm);
}
static void test_available(void)
{
char **name = check;
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}
}
static inline int tcrypt_test(const char *alg)
{
int ret;
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL in fips mode */
if (fips_enabled && ret == -EINVAL)
ret = 0;
return ret;
}
static int do_test(int m)
{
int i;
int ret = 0;
switch (m) {
case 0:
for (i = 1; i < 200; i++)
ret += do_test(i);
break;
case 1:
ret += tcrypt_test("md5");
break;
case 2:
ret += tcrypt_test("sha1");
break;
case 3:
ret += tcrypt_test("ecb(des)");
ret += tcrypt_test("cbc(des)");
break;
case 4:
ret += tcrypt_test("ecb(des3_ede)");
ret += tcrypt_test("cbc(des3_ede)");
break;
case 5:
ret += tcrypt_test("md4");
break;
case 6:
ret += tcrypt_test("sha256");
break;
case 7:
ret += tcrypt_test("ecb(blowfish)");
ret += tcrypt_test("cbc(blowfish)");
break;
case 8:
ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)");
break;
case 9:
ret += tcrypt_test("ecb(serpent)");
break;
case 10:
ret += tcrypt_test("ecb(aes)");
ret += tcrypt_test("cbc(aes)");
ret += tcrypt_test("lrw(aes)");
ret += tcrypt_test("xts(aes)");
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
break;
case 11:
ret += tcrypt_test("sha384");
break;
case 12:
ret += tcrypt_test("sha512");
break;
case 13:
ret += tcrypt_test("deflate");
break;
case 14:
ret += tcrypt_test("ecb(cast5)");
break;
case 15:
ret += tcrypt_test("ecb(cast6)");
break;
case 16:
ret += tcrypt_test("ecb(arc4)");
break;
case 17:
ret += tcrypt_test("michael_mic");
break;
case 18:
ret += tcrypt_test("crc32c");
break;
case 19:
ret += tcrypt_test("ecb(tea)");
break;
case 20:
ret += tcrypt_test("ecb(xtea)");
break;
case 21:
ret += tcrypt_test("ecb(khazad)");
break;
case 22:
ret += tcrypt_test("wp512");
break;
case 23:
ret += tcrypt_test("wp384");
break;
case 24:
ret += tcrypt_test("wp256");
break;
case 25:
ret += tcrypt_test("ecb(tnepres)");
break;
case 26:
ret += tcrypt_test("ecb(anubis)");
ret += tcrypt_test("cbc(anubis)");
break;
case 27:
ret += tcrypt_test("tgr192");
break;
case 28:
ret += tcrypt_test("tgr160");
break;
case 29:
ret += tcrypt_test("tgr128");
break;
case 30:
ret += tcrypt_test("ecb(xeta)");
break;
case 31:
ret += tcrypt_test("pcbc(fcrypt)");
break;
case 32:
ret += tcrypt_test("ecb(camellia)");
ret += tcrypt_test("cbc(camellia)");
break;
case 33:
ret += tcrypt_test("sha224");
break;
case 34:
ret += tcrypt_test("salsa20");
break;
case 35:
ret += tcrypt_test("gcm(aes)");
break;
case 36:
ret += tcrypt_test("lzo");
break;
case 37:
ret += tcrypt_test("ccm(aes)");
break;
case 38:
ret += tcrypt_test("cts(cbc(aes))");
break;
case 39:
ret += tcrypt_test("rmd128");
break;
case 40:
ret += tcrypt_test("rmd160");
break;
case 41:
ret += tcrypt_test("rmd256");
break;
case 42:
ret += tcrypt_test("rmd320");
break;
case 43:
ret += tcrypt_test("ecb(seed)");
break;
case 44:
ret += tcrypt_test("zlib");
break;
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
case 46:
ret += tcrypt_test("ofb(aes)");
break;
case 100:
ret += tcrypt_test("hmac(md5)");
break;
case 101:
ret += tcrypt_test("hmac(sha1)");
break;
case 102:
ret += tcrypt_test("hmac(sha256)");
break;
case 103:
ret += tcrypt_test("hmac(sha384)");
break;
case 104:
ret += tcrypt_test("hmac(sha512)");
break;
case 105:
ret += tcrypt_test("hmac(sha224)");
break;
case 106:
ret += tcrypt_test("xcbc(aes)");
break;
case 107:
ret += tcrypt_test("hmac(rmd128)");
break;
case 108:
ret += tcrypt_test("hmac(rmd160)");
break;
case 109:
ret += tcrypt_test("vmac(aes)");
break;
case 110:
ret += tcrypt_test("cmac(aes)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break;
case 151:
ret += tcrypt_test("rfc4106(gcm(aes))");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 201:
test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 202:
test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 203:
test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 204:
test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 205:
test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 206:
test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
break;
case 300:
/* fall through */
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
case 399:
break;
case 400:
/* fall through */
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 499:
break;
case 1000:
test_available();
break;
}
return ret;
}
static int do_alg_test(const char *alg, u32 type, u32 mask)
{
return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
0 : -ENOENT;
}
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
int i;
for (i = 0; i < TVMEMSIZE; i++) {
tvmem[i] = (void *)__get_free_page(GFP_KERNEL);
if (!tvmem[i])
goto err_free_tv;
}
if (alg)
err = do_alg_test(alg, type, mask);
else
err = do_test(mode);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
}
/* We intentionaly return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we?
* -- mludvig
*/
if (!fips_enabled)
err = -EAGAIN;
err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
free_page((unsigned long)tvmem[i]);
return err;
}
/*
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
module_param(type, uint, 0);
module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
| gpl-2.0 |
mossmaurice/binutils-redhat | libiberty/basename.c | 591 | 1283 | /* Return the basename of a pathname.
This file is in the public domain. */
/*
@deftypefn Supplemental char* basename (const char *@var{name})
Returns a pointer to the last component of pathname @var{name}.
Behavior is undefined if the pathname ends in a directory separator.
@end deftypefn
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "ansidecl.h"
#include "libiberty.h"
#include "safe-ctype.h"
#ifndef DIR_SEPARATOR
#define DIR_SEPARATOR '/'
#endif
#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
defined (__OS2__)
#define HAVE_DOS_BASED_FILE_SYSTEM
#ifndef DIR_SEPARATOR_2
#define DIR_SEPARATOR_2 '\\'
#endif
#endif
/* Define IS_DIR_SEPARATOR. */
#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
char *
basename (const char *name)
{
const char *base;
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
/* Skip over the disk name in MSDOS pathnames. */
if (ISALPHA (name[0]) && name[1] == ':')
name += 2;
#endif
for (base = name; *name; name++)
{
if (IS_DIR_SEPARATOR (*name))
{
base = name + 1;
}
}
return (char *) base;
}
| gpl-2.0 |
Klozz/2.6.32.xx-Yuki-kernel | fs/cachefiles/xattr.c | 591 | 6541 | /* CacheFiles extended attribute management
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/xattr.h>
#include "internal.h"
static const char cachefiles_xattr_cache[] =
XATTR_USER_PREFIX "CacheFiles.cache";
/*
* check the type label on an object
* - done using xattrs
*/
int cachefiles_check_object_type(struct cachefiles_object *object)
{
struct dentry *dentry = object->dentry;
char type[3], xtype[3];
int ret;
ASSERT(dentry);
ASSERT(dentry->d_inode);
if (!object->fscache.cookie)
strcpy(type, "C3");
else
snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
_enter("%p{%s}", object, type);
/* attempt to install a type label directly */
ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2,
XATTR_CREATE);
if (ret == 0) {
_debug("SET"); /* we succeeded */
goto error;
}
if (ret != -EEXIST) {
kerror("Can't set xattr on %*.*s [%lu] (err %d)",
dentry->d_name.len, dentry->d_name.len,
dentry->d_name.name, dentry->d_inode->i_ino,
-ret);
goto error;
}
/* read the current type label */
ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3);
if (ret < 0) {
if (ret == -ERANGE)
goto bad_type_length;
kerror("Can't read xattr on %*.*s [%lu] (err %d)",
dentry->d_name.len, dentry->d_name.len,
dentry->d_name.name, dentry->d_inode->i_ino,
-ret);
goto error;
}
/* check the type is what we're expecting */
if (ret != 2)
goto bad_type_length;
if (xtype[0] != type[0] || xtype[1] != type[1])
goto bad_type;
ret = 0;
error:
_leave(" = %d", ret);
return ret;
bad_type_length:
kerror("Cache object %lu type xattr length incorrect",
dentry->d_inode->i_ino);
ret = -EIO;
goto error;
bad_type:
xtype[2] = 0;
kerror("Cache object %*.*s [%lu] type %s not %s",
dentry->d_name.len, dentry->d_name.len,
dentry->d_name.name, dentry->d_inode->i_ino,
xtype, type);
ret = -EIO;
goto error;
}
/*
* set the state xattr on a cache file
*/
int cachefiles_set_object_xattr(struct cachefiles_object *object,
struct cachefiles_xattr *auxdata)
{
struct dentry *dentry = object->dentry;
int ret;
ASSERT(object->fscache.cookie);
ASSERT(dentry);
_enter("%p,#%d", object, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
&auxdata->type, auxdata->len,
XATTR_CREATE);
if (ret < 0 && ret != -ENOMEM)
cachefiles_io_error_obj(
object,
"Failed to set xattr with error %d", ret);
_leave(" = %d", ret);
return ret;
}
/*
* update the state xattr on a cache file
*/
int cachefiles_update_object_xattr(struct cachefiles_object *object,
struct cachefiles_xattr *auxdata)
{
struct dentry *dentry = object->dentry;
int ret;
ASSERT(object->fscache.cookie);
ASSERT(dentry);
_enter("%p,#%d", object, auxdata->len);
/* attempt to install the cache metadata directly */
_debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
&auxdata->type, auxdata->len,
XATTR_REPLACE);
if (ret < 0 && ret != -ENOMEM)
cachefiles_io_error_obj(
object,
"Failed to update xattr with error %d", ret);
_leave(" = %d", ret);
return ret;
}
/*
* check the state xattr on a cache file
* - return -ESTALE if the object should be deleted
*/
int cachefiles_check_object_xattr(struct cachefiles_object *object,
struct cachefiles_xattr *auxdata)
{
struct cachefiles_xattr *auxbuf;
struct dentry *dentry = object->dentry;
int ret;
_enter("%p,#%d", object, auxdata->len);
ASSERT(dentry);
ASSERT(dentry->d_inode);
auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
if (!auxbuf) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
/* read the current type label */
ret = vfs_getxattr(dentry, cachefiles_xattr_cache,
&auxbuf->type, 512 + 1);
if (ret < 0) {
if (ret == -ENODATA)
goto stale; /* no attribute - power went off
* mid-cull? */
if (ret == -ERANGE)
goto bad_type_length;
cachefiles_io_error_obj(object,
"Can't read xattr on %lu (err %d)",
dentry->d_inode->i_ino, -ret);
goto error;
}
/* check the on-disk object */
if (ret < 1)
goto bad_type_length;
if (auxbuf->type != auxdata->type)
goto stale;
auxbuf->len = ret;
/* consult the netfs */
if (object->fscache.cookie->def->check_aux) {
enum fscache_checkaux result;
unsigned int dlen;
dlen = auxbuf->len - 1;
_debug("checkaux %s #%u",
object->fscache.cookie->def->name, dlen);
result = fscache_check_aux(&object->fscache,
&auxbuf->data, dlen);
switch (result) {
/* entry okay as is */
case FSCACHE_CHECKAUX_OKAY:
goto okay;
/* entry requires update */
case FSCACHE_CHECKAUX_NEEDS_UPDATE:
break;
/* entry requires deletion */
case FSCACHE_CHECKAUX_OBSOLETE:
goto stale;
default:
BUG();
}
/* update the current label */
ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
&auxdata->type, auxdata->len,
XATTR_REPLACE);
if (ret < 0) {
cachefiles_io_error_obj(object,
"Can't update xattr on %lu"
" (error %d)",
dentry->d_inode->i_ino, -ret);
goto error;
}
}
okay:
ret = 0;
error:
kfree(auxbuf);
_leave(" = %d", ret);
return ret;
bad_type_length:
kerror("Cache object %lu xattr length incorrect",
dentry->d_inode->i_ino);
ret = -EIO;
goto error;
stale:
ret = -ESTALE;
goto error;
}
/*
* remove the object's xattr to mark it stale
*/
int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
struct dentry *dentry)
{
int ret;
ret = vfs_removexattr(dentry, cachefiles_xattr_cache);
if (ret < 0) {
if (ret == -ENOENT || ret == -ENODATA)
ret = 0;
else if (ret != -ENOMEM)
cachefiles_io_error(cache,
"Can't remove xattr from %lu"
" (error %d)",
dentry->d_inode->i_ino, -ret);
}
_leave(" = %d", ret);
return ret;
}
| gpl-2.0 |
kamarush/ZTE_GXIn_Kernel-3.0.8 | lib/nlattr.c | 847 | 12528 | /*
* NETLINK Netlink attributes
*
* Authors: Thomas Graf <tgraf@suug.ch>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/netlink.h>
static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
};
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy)
{
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
if (type <= 0 || type > maxtype)
return 0;
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
switch (pt->type) {
case NLA_FLAG:
if (attrlen > 0)
return -ERANGE;
break;
case NLA_NUL_STRING:
if (pt->len)
minlen = min_t(int, attrlen, pt->len + 1);
else
minlen = attrlen;
if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
return -EINVAL;
/* fall through */
case NLA_STRING:
if (attrlen < 1)
return -ERANGE;
if (pt->len) {
char *buf = nla_data(nla);
if (buf[attrlen - 1] == '\0')
attrlen--;
if (attrlen > pt->len)
return -ERANGE;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
return -ERANGE;
break;
case NLA_NESTED_COMPAT:
if (attrlen < pt->len)
return -ERANGE;
if (attrlen < NLA_ALIGN(pt->len))
break;
if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
return -ERANGE;
nla = nla_data(nla) + NLA_ALIGN(pt->len);
if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
return -ERANGE;
break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
default:
if (pt->len)
minlen = pt->len;
else if (pt->type != NLA_UNSPEC)
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
return -ERANGE;
}
return 0;
}
/**
* nla_validate - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
* ignored. See documenation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy)
{
const struct nlattr *nla;
int rem, err;
nla_for_each_attr(nla, head, len, rem) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
goto errout;
}
err = 0;
errout:
return err;
}
/**
* nla_policy_len - Determin the max. length of a policy
* @policy: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
* to allocated Netlink buffers roughly the size of the actual
* message.
*
* Returns 0 on success or a negative error code.
*/
int
nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_minlen[p->type])
len += nla_total_size(nla_attr_minlen[p->type]);
}
return len;
}
/**
* nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type. Attributes with a type
* exceeding maxtype will be silently ignored for backwards compatibility
* reasons. policy may be set to NULL if no validation is required.
*
* Returns 0 on success or a negative error code.
*/
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy)
{
const struct nlattr *nla;
int rem, err;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
if (type > 0 && type <= maxtype) {
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
goto errout;
}
tb[type] = (struct nlattr *)nla;
}
}
if (unlikely(rem > 0))
printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
"attributes.\n", rem);
err = 0;
errout:
return err;
}
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @attrtype: type of attribute to look for
*
* Returns the first attribute in the stream matching the specified type.
*/
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
{
const struct nlattr *nla;
int rem;
nla_for_each_attr(nla, head, len, rem)
if (nla_type(nla) == attrtype)
return (struct nlattr *)nla;
return NULL;
}
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
* @dst: where to copy the string to
* @nla: attribute to copy the string from
* @dstsize: size of destination buffer
*
* Copies at most dstsize - 1 bytes into the destination buffer.
* The result is always a valid NUL-terminated string. Unlike
* strlcpy the destination buffer is always padded out.
*
* Returns the length of the source buffer.
*/
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
if (dstsize > 0) {
size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
memset(dst, 0, dstsize);
memcpy(dst, src, len);
}
return srclen;
}
/**
* nla_memcpy - Copy a netlink attribute into another memory area
* @dest: where to copy to memcpy
* @src: netlink attribute to copy from
* @count: size of the destination area
*
* Note: The number of bytes copied is limited by the length of
* attribute's payload. memcpy
*
* Returns the number of bytes copied.
*/
int nla_memcpy(void *dest, const struct nlattr *src, int count)
{
int minlen = min_t(int, count, nla_len(src));
memcpy(dest, nla_data(src), minlen);
return minlen;
}
/**
* nla_memcmp - Compare an attribute with sized memory area
* @nla: netlink attribute
* @data: memory area
* @size: size of memory area
*/
int nla_memcmp(const struct nlattr *nla, const void *data,
size_t size)
{
int d = nla_len(nla) - size;
if (d == 0)
d = memcmp(nla_data(nla), data, size);
return d;
}
/**
* nla_strcmp - Compare a string attribute against a string
* @nla: netlink string attribute
* @str: another string
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
int len = strlen(str) + 1;
int d = nla_len(nla) - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
#ifdef CONFIG_NET
/**
* __nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct nlattr *nla;
nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
return nla;
}
EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the payload.
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
void *start;
start = skb_put(skb, NLA_ALIGN(attrlen));
memset(start, 0, NLA_ALIGN(attrlen));
return start;
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
/**
* nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return NULL;
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return NULL;
return __nla_reserve_nohdr(skb, attrlen);
}
EXPORT_SYMBOL(nla_reserve_nohdr);
/**
* __nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data)
{
struct nlattr *nla;
nla = __nla_reserve(skb, attrtype, attrlen);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute payload.
*/
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
void *start;
start = __nla_reserve_nohdr(skb, attrlen);
memcpy(start, data, attrlen);
}
EXPORT_SYMBOL(__nla_put_nohdr);
/**
* nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return -EMSGSIZE;
__nla_put(skb, attrtype, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put);
/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
__nla_put_nohdr(skb, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put_nohdr);
/**
* nla_append - Add a netlink attribute without header or padding
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_append(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
memcpy(skb_put(skb, attrlen), data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
#endif
EXPORT_SYMBOL(nla_validate);
EXPORT_SYMBOL(nla_policy_len);
EXPORT_SYMBOL(nla_parse);
EXPORT_SYMBOL(nla_find);
EXPORT_SYMBOL(nla_strlcpy);
EXPORT_SYMBOL(nla_memcpy);
EXPORT_SYMBOL(nla_memcmp);
EXPORT_SYMBOL(nla_strcmp);
| gpl-2.0 |
Altaf-Mahdi/i9505 | drivers/staging/speakup/kobjects.c | 847 | 24701 | /*
* Speakup kobject implementation
*
* Copyright (C) 2009 William Hubbs
*
* This code is based on kobject-example.c, which came with linux 2.6.x.
*
* Copyright (C) 2004-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2007 Novell Inc.
*
* Released under the GPL version 2 only.
*
*/
#include <linux/slab.h> /* For kmalloc. */
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include "speakup.h"
#include "spk_priv.h"
/*
* This is called when a user reads the characters or chartab sys file.
*/
static ssize_t chars_chartab_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int i;
int len = 0;
char *cp;
char *buf_pointer = buf;
size_t bufsize = PAGE_SIZE;
unsigned long flags;
spk_lock(flags);
*buf_pointer = '\0';
for (i = 0; i < 256; i++) {
if (bufsize <= 1)
break;
if (strcmp("characters", attr->attr.name) == 0) {
len = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
i, spk_characters[i]);
} else { /* show chartab entry */
if (IS_TYPE(i, B_CTL))
cp = "B_CTL";
else if (IS_TYPE(i, WDLM))
cp = "WDLM";
else if (IS_TYPE(i, A_PUNC))
cp = "A_PUNC";
else if (IS_TYPE(i, PUNC))
cp = "PUNC";
else if (IS_TYPE(i, NUM))
cp = "NUM";
else if (IS_TYPE(i, A_CAP))
cp = "A_CAP";
else if (IS_TYPE(i, ALPHA))
cp = "ALPHA";
else if (IS_TYPE(i, B_CAPSYM))
cp = "B_CAPSYM";
else if (IS_TYPE(i, B_SYM))
cp = "B_SYM";
else
cp = "0";
len =
scnprintf(buf_pointer, bufsize, "%d\t%s\n", i, cp);
}
bufsize -= len;
buf_pointer += len;
}
spk_unlock(flags);
return buf_pointer - buf;
}
/*
* Print informational messages or warnings after updating
* character descriptions or chartab entries.
*/
static void report_char_chartab_status(int reset, int received, int used,
int rejected, int do_characters)
{
char *object_type[] = {
"character class entries",
"character descriptions",
};
int len;
char buf[80];
if (reset) {
pr_info("%s reset to defaults\n", object_type[do_characters]);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d %s\n",
used, received, object_type[do_characters]);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
printk(buf);
}
}
/*
* This is called when a user changes the characters or chartab parameters.
*/
static ssize_t chars_chartab_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
char *cp = (char *) buf;
char *end = cp + count; /* the null at the end of the buffer */
char *linefeed = NULL;
char keyword[MAX_DESC_LEN + 1];
char *outptr = NULL; /* Will hold keyword or desc. */
char *temp = NULL;
char *desc = NULL;
ssize_t retval = count;
unsigned long flags;
unsigned long index = 0;
int charclass = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
int do_characters = !strcmp(attr->attr.name, "characters");
size_t desc_length = 0;
int i;
spk_lock(flags);
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if ((*cp == '\n') || strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
index = simple_strtoul(cp, &temp, 10);
if (index > 255) {
rejected++;
cp = linefeed + 1;
continue;
}
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
if (desc_length > MAX_DESC_LEN) {
rejected++;
cp = linefeed + 1;
continue;
}
if (do_characters) {
desc = kmalloc(desc_length + 1, GFP_ATOMIC);
if (!desc) {
retval = -ENOMEM;
reset = 1; /* just reset on error. */
break;
}
outptr = desc;
} else {
outptr = keyword;
}
for (i = 0; i < desc_length; i++)
outptr[i] = temp[i];
outptr[desc_length] = '\0';
if (do_characters) {
if (spk_characters[index] != spk_default_chars[index])
kfree(spk_characters[index]);
spk_characters[index] = desc;
used++;
} else {
charclass = spk_chartab_get_value(keyword);
if (charclass == 0) {
rejected++;
cp = linefeed + 1;
continue;
}
if (charclass != spk_chartab[index]) {
spk_chartab[index] = charclass;
used++;
}
}
cp = linefeed + 1;
}
if (reset) {
if (do_characters)
spk_reset_default_chars();
else
spk_reset_default_chartab();
}
spk_unlock(flags);
report_char_chartab_status(reset, received, used, rejected,
do_characters);
return retval;
}
/*
* This is called when a user reads the keymap parameter.
*/
static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp = buf;
int i;
int n;
int num_keys;
int nstates;
u_char *cp1;
u_char ch;
unsigned long flags;
spk_lock(flags);
cp1 = spk_key_buf + SHIFT_TBL_SIZE;
num_keys = (int)(*cp1);
nstates = (int)cp1[1];
cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
cp1 += 2; /* now pointing at shift states */
/* dump num_keys+1 as first row is shift states + flags,
* each subsequent row is key + states */
for (n = 0; n <= num_keys; n++) {
for (i = 0; i <= nstates; i++) {
ch = *cp1++;
cp += sprintf(cp, "%d,", (int)ch);
*cp++ = (i < nstates) ? SPACE : '\n';
}
}
cp += sprintf(cp, "0, %d\n", KEY_MAP_VER);
spk_unlock(flags);
return (int)(cp-buf);
}
/*
* This is called when a user changes the keymap parameter.
*/
static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int i;
ssize_t ret = count;
char *in_buff = NULL;
char *cp;
u_char *cp1;
unsigned long flags;
spk_lock(flags);
in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spk_unlock(flags);
return -ENOMEM;
}
if (strchr("dDrR", *in_buff)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
pr_info("keymap set to default values\n");
kfree(in_buff);
spk_unlock(flags);
return count;
}
if (in_buff[count - 1] == '\n')
in_buff[count - 1] = '\0';
cp = in_buff;
cp1 = (u_char *)in_buff;
for (i = 0; i < 3; i++) {
cp = spk_s2uchar(cp, cp1);
cp1++;
}
i = (int)cp1[-2]+1;
i *= (int)cp1[-1]+1;
i += 2; /* 0 and last map ver */
if (cp1[-3] != KEY_MAP_VER || cp1[-1] > 10 ||
i+SHIFT_TBL_SIZE+4 >= sizeof(spk_key_buf)) {
pr_warn("i %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
spk_unlock(flags);
return -EINVAL;
}
while (--i >= 0) {
cp = spk_s2uchar(cp, cp1);
cp1++;
if (!(*cp))
break;
}
if (i != 0 || cp1[-1] != KEY_MAP_VER || cp1[-2] != 0) {
ret = -EINVAL;
pr_warn("end %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
} else {
if (spk_set_key_info(in_buff, spk_key_buf)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
ret = -EINVAL;
pr_warn("set key failed\n");
}
}
kfree(in_buff);
spk_unlock(flags);
return ret;
}
/*
* This is called when a user changes the value of the silent parameter.
*/
static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
struct vc_data *vc = vc_cons[fg_console].d;
char ch = 0;
char shut;
unsigned long flags;
len = strlen(buf);
if (len > 0 && len < 3) {
ch = buf[0];
if (ch == '\n')
ch = '0';
}
if (ch < '0' || ch > '7') {
pr_warn("silent value '%c' not in range (0,7)\n", ch);
return -EINVAL;
}
spk_lock(flags);
if (ch&2) {
shut = 1;
spk_do_flush();
} else {
shut = 0;
}
if (ch&4)
shut |= 0x40;
if (ch&1)
spk_shut_up |= shut;
else
spk_shut_up &= ~shut;
spk_unlock(flags);
return count;
}
/*
* This is called when a user reads the synth setting.
*/
static ssize_t synth_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv;
if (synth == NULL)
rv = sprintf(buf, "%s\n", "none");
else
rv = sprintf(buf, "%s\n", synth->name);
return rv;
}
/*
* This is called when a user requests to change synthesizers.
*/
static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int len;
char new_synth_name[10];
len = strlen(buf);
if (len < 2 || len > 9)
return -EINVAL;
strncpy(new_synth_name, buf, len);
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
spk_strlwr(new_synth_name);
if ((synth != NULL) && (!strcmp(new_synth_name, synth->name))) {
pr_warn("%s already in use\n", new_synth_name);
} else if (synth_init(new_synth_name) != 0) {
pr_warn("failed to init synth %s\n", new_synth_name);
return -ENODEV;
}
return count;
}
/*
* This is called when text is sent to the synth via the synth_direct file.
*/
static ssize_t synth_direct_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
u_char tmp[256];
int len;
int bytes;
const char *ptr = buf;
if (!synth)
return -EPERM;
len = strlen(buf);
while (len > 0) {
bytes = min_t(size_t, len, 250);
strncpy(tmp, ptr, bytes);
tmp[bytes] = '\0';
spk_xlate(tmp);
synth_printf("%s", tmp);
ptr += bytes;
len -= bytes;
}
return count;
}
/*
* This function is called when a user reads the version.
*/
static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *cp;
cp = buf;
cp += sprintf(cp, "Speakup version %s\n", SPEAKUP_VERSION);
if (synth)
cp += sprintf(cp, "%s synthesizer driver version %s\n",
synth->name, synth->version);
return cp - buf;
}
/*
* This is called when a user reads the punctuation settings.
*/
static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int i;
char *cp = buf;
struct st_var_header *p_header;
struct punc_var_t *var;
struct st_bits_data *pb;
short mask;
unsigned long flags;
p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
spk_lock(flags);
pb = (struct st_bits_data *) &spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
if (!(spk_chartab[i]&mask))
continue;
*cp++ = (char)i;
}
spk_unlock(flags);
return cp-buf;
}
/*
* This is called when a user changes the punctuation settings.
*/
static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int x;
struct st_var_header *p_header;
struct punc_var_t *var;
char punc_buf[100];
unsigned long flags;
x = strlen(buf);
if (x < 1 || x > 99)
return -EINVAL;
p_header = spk_var_header_by_name(attr->attr.name);
if (p_header == NULL) {
pr_warn("p_header is null, attr->attr.name is %s\n",
attr->attr.name);
return -EINVAL;
}
var = spk_get_punc_var(p_header->var_id);
if (var == NULL) {
pr_warn("var is null, p_header->var_id is %i\n",
p_header->var_id);
return -EINVAL;
}
strncpy(punc_buf, buf, x);
while (x && punc_buf[x - 1] == '\n')
x--;
punc_buf[x] = '\0';
spk_lock(flags);
if (*punc_buf == 'd' || *punc_buf == 'r')
x = spk_set_mask_bits(0, var->value, 3);
else
x = spk_set_mask_bits(punc_buf, var->value, 3);
spk_unlock(flags);
return count;
}
/*
* This function is called when a user reads one of the variable parameters.
*/
ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
int rv = 0;
struct st_var_header *param;
struct var_t *var;
char *cp1;
char *cp;
char ch;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
spk_lock(flags);
var = (struct var_t *) param->data;
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (var)
rv = sprintf(buf, "%i\n", var->u.n.value);
else
rv = sprintf(buf, "0\n");
break;
case VAR_STRING:
if (var) {
cp1 = buf;
*cp1++ = '"';
for (cp = (char *)param->p_val; (ch = *cp); cp++) {
if (ch >= ' ' && ch < '~')
*cp1++ = ch;
else
cp1 += sprintf(cp1, "\\""x%02x", ch);
}
*cp1++ = '"';
*cp1++ = '\n';
*cp1 = '\0';
rv = cp1-buf;
} else {
rv = sprintf(buf, "\"\"\n");
}
break;
default:
rv = sprintf(buf, "Bad parameter %s, type %i\n",
param->name, param->var_type);
break;
}
spk_unlock(flags);
return rv;
}
EXPORT_SYMBOL_GPL(spk_var_show);
/*
* This function is called when a user echos a value to one of the
* variable parameters.
*/
ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct st_var_header *param;
int ret;
int len;
char *cp;
struct var_t *var_data;
int value;
unsigned long flags;
param = spk_var_header_by_name(attr->attr.name);
if (param == NULL)
return -EINVAL;
if (param->data == NULL)
return 0;
ret = 0;
cp = spk_xlate((char *) buf);
spk_lock(flags);
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
if (*cp == 'd' || *cp == 'r' || *cp == '\0')
len = E_DEFAULT;
else if (*cp == '+' || *cp == '-')
len = E_INC;
else
len = E_SET;
speakup_s2i(cp, &value);
ret = spk_set_num_var(value, param, len);
if (ret == E_RANGE) {
var_data = param->data;
pr_warn("value for %s out of range, expect %d to %d\n",
attr->attr.name,
var_data->u.n.low, var_data->u.n.high);
}
break;
case VAR_STRING:
len = strlen(buf);
if ((len >= 1) && (buf[len - 1] == '\n'))
--len;
if ((len >= 2) && (buf[0] == '"') && (buf[len - 1] == '"')) {
++buf;
len -= 2;
}
cp = (char *) buf;
cp[len] = '\0';
ret = spk_set_string_var(buf, param, len);
if (ret == E_TOOLONG)
pr_warn("value too long for %s\n",
attr->attr.name);
break;
default:
pr_warn("%s unknown type %d\n",
param->name, (int)param->var_type);
break;
}
/*
* If voice was just changed, we might need to reset our default
* pitch and volume.
*/
if (strcmp(attr->attr.name, "voice") == 0) {
if (synth && synth->default_pitch) {
param = spk_var_header_by_name("pitch");
if (param) {
spk_set_num_var(synth->default_pitch[value], param,
E_NEW_DEFAULT);
spk_set_num_var(0, param, E_DEFAULT);
}
}
if (synth && synth->default_vol) {
param = spk_var_header_by_name("vol");
if (param) {
spk_set_num_var(synth->default_vol[value], param,
E_NEW_DEFAULT);
spk_set_num_var(0, param, E_DEFAULT);
}
}
}
spk_unlock(flags);
if (ret == SET_DEFAULT)
pr_info("%s reset to default value\n", attr->attr.name);
return count;
}
EXPORT_SYMBOL_GPL(spk_var_store);
/*
* Functions for reading and writing lists of i18n messages. Incomplete.
*/
static ssize_t message_show_helper(char *buf, enum msg_index_t first,
enum msg_index_t last)
{
size_t bufsize = PAGE_SIZE;
char *buf_pointer = buf;
int printed;
enum msg_index_t cursor;
int index = 0;
*buf_pointer = '\0'; /* buf_pointer always looking at a NUL byte. */
for (cursor = first; cursor <= last; cursor++, index++) {
if (bufsize <= 1)
break;
printed = scnprintf(buf_pointer, bufsize, "%d\t%s\n",
index, spk_msg_get(cursor));
buf_pointer += printed;
bufsize -= printed;
}
return buf_pointer - buf;
}
static void report_msg_status(int reset, int received, int used,
int rejected, char *groupname)
{
int len;
char buf[160];
if (reset) {
pr_info("i18n messages from group %s reset to defaults\n",
groupname);
} else if (received) {
len = snprintf(buf, sizeof(buf),
" updated %d of %d i18n messages from group %s\n",
used, received, groupname);
if (rejected)
snprintf(buf + (len - 1), sizeof(buf) - (len - 1),
" with %d reject%s\n",
rejected, rejected > 1 ? "s" : "");
printk(buf);
}
}
static ssize_t message_store_helper(const char *buf, size_t count,
struct msg_group_t *group)
{
char *cp = (char *) buf;
char *end = cp + count;
char *linefeed = NULL;
char *temp = NULL;
ssize_t msg_stored = 0;
ssize_t retval = count;
size_t desc_length = 0;
unsigned long index = 0;
int received = 0;
int used = 0;
int rejected = 0;
int reset = 0;
enum msg_index_t firstmessage = group->start;
enum msg_index_t lastmessage = group->end;
enum msg_index_t curmessage;
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
cp++;
if (cp == end)
break;
if (strchr("dDrR", *cp)) {
reset = 1;
break;
}
received++;
linefeed = strchr(cp, '\n');
if (!linefeed) {
rejected++;
break;
}
if (!isdigit(*cp)) {
rejected++;
cp = linefeed + 1;
continue;
}
index = simple_strtoul(cp, &temp, 10);
while ((temp < linefeed) && (*temp == ' ' || *temp == '\t'))
temp++;
desc_length = linefeed - temp;
curmessage = firstmessage + index;
/*
* Note the check (curmessage < firstmessage). It is not
* redundant. Suppose that the user gave us an index
* equal to ULONG_MAX - 1. If firstmessage > 1, then
* firstmessage + index < firstmessage!
*/
if ((curmessage < firstmessage) || (curmessage > lastmessage)) {
rejected++;
cp = linefeed + 1;
continue;
}
msg_stored = spk_msg_set(curmessage, temp, desc_length);
if (msg_stored < 0) {
retval = msg_stored;
if (msg_stored == -ENOMEM)
reset = 1;
break;
} else {
used++;
}
cp = linefeed + 1;
}
if (reset)
spk_reset_msg_group(group);
report_msg_status(reset, received, used, rejected, group->name);
return retval;
}
static ssize_t message_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
unsigned long flags;
BUG_ON(!group);
spk_lock(flags);
retval = message_show_helper(buf, group->start, group->end);
spk_unlock(flags);
return retval;
}
static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t retval = 0;
struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
BUG_ON(!group);
retval = message_store_helper(buf, count, group);
return retval;
}
/*
* Declare the attributes.
*/
static struct kobj_attribute keymap_attribute =
__ATTR(keymap, ROOT_W, keymap_show, keymap_store);
static struct kobj_attribute silent_attribute =
__ATTR(silent, USER_W, NULL, silent_store);
static struct kobj_attribute synth_attribute =
__ATTR(synth, USER_RW, synth_show, synth_store);
static struct kobj_attribute synth_direct_attribute =
__ATTR(synth_direct, USER_W, NULL, synth_direct_store);
static struct kobj_attribute version_attribute =
__ATTR_RO(version);
static struct kobj_attribute delimiters_attribute =
__ATTR(delimiters, USER_RW, punc_show, punc_store);
static struct kobj_attribute ex_num_attribute =
__ATTR(ex_num, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_all_attribute =
__ATTR(punc_all, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_most_attribute =
__ATTR(punc_most, USER_RW, punc_show, punc_store);
static struct kobj_attribute punc_some_attribute =
__ATTR(punc_some, USER_RW, punc_show, punc_store);
static struct kobj_attribute repeats_attribute =
__ATTR(repeats, USER_RW, punc_show, punc_store);
static struct kobj_attribute attrib_bleep_attribute =
__ATTR(attrib_bleep, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bell_pos_attribute =
__ATTR(bell_pos, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bleep_time_attribute =
__ATTR(bleep_time, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute bleeps_attribute =
__ATTR(bleeps, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute cursor_time_attribute =
__ATTR(cursor_time, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute key_echo_attribute =
__ATTR(key_echo, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute no_interrupt_attribute =
__ATTR(no_interrupt, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punc_level_attribute =
__ATTR(punc_level, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute reading_punc_attribute =
__ATTR(reading_punc, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute say_control_attribute =
__ATTR(say_control, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute say_word_ctl_attribute =
__ATTR(say_word_ctl, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute spell_delay_attribute =
__ATTR(spell_delay, USER_RW, spk_var_show, spk_var_store);
/*
* These attributes are i18n related.
*/
static struct kobj_attribute announcements_attribute =
__ATTR(announcements, USER_RW, message_show, message_store);
static struct kobj_attribute characters_attribute =
__ATTR(characters, USER_RW, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute chartab_attribute =
__ATTR(chartab, USER_RW, chars_chartab_show, chars_chartab_store);
static struct kobj_attribute ctl_keys_attribute =
__ATTR(ctl_keys, USER_RW, message_show, message_store);
static struct kobj_attribute colors_attribute =
__ATTR(colors, USER_RW, message_show, message_store);
static struct kobj_attribute formatted_attribute =
__ATTR(formatted, USER_RW, message_show, message_store);
static struct kobj_attribute function_names_attribute =
__ATTR(function_names, USER_RW, message_show, message_store);
static struct kobj_attribute key_names_attribute =
__ATTR(key_names, USER_RW, message_show, message_store);
static struct kobj_attribute states_attribute =
__ATTR(states, USER_RW, message_show, message_store);
/*
* Create groups of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *main_attrs[] = {
&keymap_attribute.attr,
&silent_attribute.attr,
&synth_attribute.attr,
&synth_direct_attribute.attr,
&version_attribute.attr,
&delimiters_attribute.attr,
&ex_num_attribute.attr,
&punc_all_attribute.attr,
&punc_most_attribute.attr,
&punc_some_attribute.attr,
&repeats_attribute.attr,
&attrib_bleep_attribute.attr,
&bell_pos_attribute.attr,
&bleep_time_attribute.attr,
&bleeps_attribute.attr,
&cursor_time_attribute.attr,
&key_echo_attribute.attr,
&no_interrupt_attribute.attr,
&punc_level_attribute.attr,
&reading_punc_attribute.attr,
&say_control_attribute.attr,
&say_word_ctl_attribute.attr,
&spell_delay_attribute.attr,
NULL,
};
static struct attribute *i18n_attrs[] = {
&announcements_attribute.attr,
&characters_attribute.attr,
&chartab_attribute.attr,
&ctl_keys_attribute.attr,
&colors_attribute.attr,
&formatted_attribute.attr,
&function_names_attribute.attr,
&key_names_attribute.attr,
&states_attribute.attr,
NULL,
};
/*
* An unnamed attribute group will put all of the attributes directly in
* the kobject directory. If we specify a name, a subdirectory will be
* created for the attributes with the directory being the name of the
* attribute group.
*/
static struct attribute_group main_attr_group = {
.attrs = main_attrs,
};
static struct attribute_group i18n_attr_group = {
.attrs = i18n_attrs,
.name = "i18n",
};
static struct kobject *accessibility_kobj;
struct kobject *speakup_kobj;
int speakup_kobj_init(void)
{
int retval;
/*
* Create a simple kobject with the name of "accessibility",
* located under /sys/
*
* As this is a simple directory, no uevent will be sent to
* userspace. That is why this function should not be used for
* any type of dynamic kobjects, where the name and number are
* not known ahead of time.
*/
accessibility_kobj = kobject_create_and_add("accessibility", NULL);
if (!accessibility_kobj) {
retval = -ENOMEM;
goto out;
}
speakup_kobj = kobject_create_and_add("speakup", accessibility_kobj);
if (!speakup_kobj) {
retval = -ENOMEM;
goto err_acc;
}
/* Create the files associated with this kobject */
retval = sysfs_create_group(speakup_kobj, &main_attr_group);
if (retval)
goto err_speakup;
retval = sysfs_create_group(speakup_kobj, &i18n_attr_group);
if (retval)
goto err_group;
goto out;
err_group:
sysfs_remove_group(speakup_kobj, &main_attr_group);
err_speakup:
kobject_put(speakup_kobj);
err_acc:
kobject_put(accessibility_kobj);
out:
return retval;
}
void speakup_kobj_exit(void)
{
sysfs_remove_group(speakup_kobj, &i18n_attr_group);
sysfs_remove_group(speakup_kobj, &main_attr_group);
kobject_put(speakup_kobj);
kobject_put(accessibility_kobj);
}
| gpl-2.0 |
playfulgod/kernel-M865 | drivers/gpu/drm/drm_hashtab.c | 1103 | 5464 | /**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#include "drm_hashtab.h"
#include <linux/hash.h>
#include <linux/slab.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int i;
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
}
if (!ht->table) {
ht->use_vmalloc = 1;
ht->table = vmalloc(ht->size*sizeof(*ht->table));
}
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
for (i=0; i< ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash_long(key, ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
entry = hlist_entry(list, struct drm_hash_item, head);
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
}
static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return list;
if (entry->key > key)
break;
}
return NULL;
}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list, *parent;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each(list, h_list) {
entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = list;
}
if (parent) {
hlist_add_after(parent, &item->head);
} else {
hlist_add_head(&item->head, h_list);
}
return 0;
}
EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1 << bits) - 1;
unsigned long first, unshifted_key;
unshifted_key = hash_long(seed, bits);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
{
struct hlist_node *list;
list = drm_ht_find_key(ht, key);
if (!list)
return -EINVAL;
*item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
ht->fill--;
return 0;
}
return -EINVAL;
}
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
if (ht->use_vmalloc)
vfree(ht->table);
else
kfree(ht->table);
ht->table = NULL;
}
}
EXPORT_SYMBOL(drm_ht_remove);
| gpl-2.0 |
followtheart/linux | arch/x86/kernel/setup_percpu.c | 1103 | 8136 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/smp.h>
#include <linux/topology.h>
#include <linux/pfn.h>
#include <asm/sections.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/highmem.h>
#include <asm/proto.h>
#include <asm/cpumask.h>
#include <asm/cpu.h>
#include <asm/stackprotector.h>
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
EXPORT_PER_CPU_SYMBOL(cpu_number);
#ifdef CONFIG_X86_64
#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
#else
#define BOOT_PERCPU_OFFSET 0
#endif
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
};
EXPORT_SYMBOL(__per_cpu_offset);
/*
* On x86_64 symbols referenced from code should be reachable using
* 32bit relocations. Reserve space for static percpu variables in
* modules so that they are always served from the first chunk which
* is located at the percpu segment base. On x86_32, anything can
* address anywhere. No need to reserve space in the first chunk.
*/
#ifdef CONFIG_X86_64
#define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
#else
#define PERCPU_FIRST_CHUNK_RESERVE 0
#endif
#ifdef CONFIG_X86_32
/**
* pcpu_need_numa - determine percpu allocation needs to consider NUMA
*
* If NUMA is not configured or there is only one NUMA node available,
* there is no reason to consider NUMA. This function determines
* whether percpu allocation should consider NUMA or not.
*
* RETURNS:
* true if NUMA should be considered; otherwise, false.
*/
static bool __init pcpu_need_numa(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
pg_data_t *last = NULL;
unsigned int cpu;
for_each_possible_cpu(cpu) {
int node = early_cpu_to_node(cpu);
if (node_online(node) && NODE_DATA(node) &&
last && last != NODE_DATA(node))
return true;
last = NODE_DATA(node);
}
#endif
return false;
}
#endif
/**
* pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
* @cpu: cpu to allocate for
* @size: size allocation in bytes
* @align: alignment
*
* Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
* does the right thing for NUMA regardless of the current
* configuration.
*
* RETURNS:
* Pointer to the allocated area on success, NULL on failure.
*/
static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
unsigned long align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
#ifdef CONFIG_NEED_MULTIPLE_NODES
int node = early_cpu_to_node(cpu);
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
ptr = __alloc_bootmem_nopanic(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
size, align, goal);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
cpu, size, node, __pa(ptr));
}
return ptr;
#else
return __alloc_bootmem_nopanic(size, align, goal);
#endif
}
/*
* Helpers for first chunk memory allocation
*/
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
return pcpu_alloc_bootmem(cpu, size, align);
}
static void __init pcpu_fc_free(void *ptr, size_t size)
{
free_bootmem(__pa(ptr), size);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
#else
return LOCAL_DISTANCE;
#endif
}
static void __init pcpup_populate_pte(unsigned long addr)
{
populate_extra_pte(addr);
}
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
struct desc_struct gdt;
pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
0x2 | DESCTYPE_S, 0x8);
gdt.s = 1;
write_gdt_entry(get_cpu_gdt_table(cpu),
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
#endif
}
void __init setup_per_cpu_areas(void)
{
unsigned int cpu;
unsigned long delta;
int rc;
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
/*
* Allocate percpu area. Embedding allocator is our favorite;
* however, on NUMA configurations, it can result in very
* sparse unit mapping and vmalloc area isn't spacious enough
* on 32bit. Use page in that case.
*/
#ifdef CONFIG_X86_32
if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
pcpu_chosen_fc = PCPU_FC_PAGE;
#endif
rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
const size_t dyn_size = PERCPU_MODULE_RESERVE +
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
size_t atom_size;
/*
* On 64bit, use PMD_SIZE for atom_size so that embedded
* percpu areas are aligned to PMD. This, in the future,
* can also allow using PMD mappings in vmalloc area. Use
* PAGE_SIZE on 32bit as vmalloc space is highly contended
* and large vmalloc area allocs can easily fail.
*/
#ifdef CONFIG_X86_64
atom_size = PMD_SIZE;
#else
atom_size = PAGE_SIZE;
#endif
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
dyn_size, atom_size,
pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
pr_warning("%s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
pcpu_fc_alloc, pcpu_fc_free,
pcpup_populate_pte);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
/* alrighty, percpu areas up and running */
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu) {
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
per_cpu(cpu_number, cpu) = cpu;
setup_percpu_segment(cpu);
setup_stack_canary_segment(cpu);
/*
* Copy data used in early init routines from the
* initial arrays to the per cpu data areas. These
* arrays then become expendable and the *_early_ptr's
* are zeroed indicating that the static arrays are
* gone.
*/
#ifdef CONFIG_X86_LOCAL_APIC
per_cpu(x86_cpu_to_apicid, cpu) =
early_per_cpu_map(x86_cpu_to_apicid, cpu);
per_cpu(x86_bios_cpu_apicid, cpu) =
early_per_cpu_map(x86_bios_cpu_apicid, cpu);
#endif
#ifdef CONFIG_X86_32
per_cpu(x86_cpu_to_logical_apicid, cpu) =
early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
#endif
#ifdef CONFIG_X86_64
per_cpu(irq_stack_ptr, cpu) =
per_cpu(irq_stack_union.irq_stack, cpu) +
IRQ_STACK_SIZE - 64;
#endif
#ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
/*
* Ensure that the boot cpu numa_node is correct when the boot
* cpu is on a node that doesn't have memory installed.
* Also cpu_up() will call cpu_to_node() for APs when
* MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
* up later with c_init aka intel_init/amd_init.
* So set them all (boot cpu and all APs).
*/
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
#endif
/*
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
*/
if (!cpu)
switch_to_new_gdt(cpu);
}
/* indicate the early static arrays will soon be gone */
#ifdef CONFIG_X86_LOCAL_APIC
early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
#endif
#ifdef CONFIG_X86_32
early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
#endif
#ifdef CONFIG_NUMA
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif
/* Setup node to cpumask map */
setup_node_to_cpumask_map();
/* Setup cpu initialized, callin, callout masks */
setup_cpu_local_masks();
}
| gpl-2.0 |
Demon000/libra | arch/s390/kernel/setup.c | 1871 | 29821 | /*
* S390 version
* Copyright IBM Corp. 1999, 2012
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "arch/i386/kernel/setup.c"
* Copyright (C) 1995, Linus Torvalds
*/
/*
* This file handles the architecture-dependent parts of initialization
*/
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/kernel_stat.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
#include <linux/topology.h>
#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/compat.h>
#include <asm/ipl.h>
#include <asm/uaccess.h>
#include <asm/facility.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/ebcdic.h>
#include <asm/kvm_virtio.h>
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
#include "entry.h"
long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
PSW_MASK_EA | PSW_MASK_BA;
long psw_user_bits = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
PSW_MASK_PSTATE | PSW_ASC_HOME;
/*
* User copy operations.
*/
struct uaccess_ops uaccess;
EXPORT_SYMBOL(uaccess);
/*
* Machine setup..
*/
unsigned int console_mode = 0;
EXPORT_SYMBOL(console_mode);
unsigned int console_devno = -1;
EXPORT_SYMBOL(console_devno);
unsigned int console_irq = -1;
EXPORT_SYMBOL(console_irq);
unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
int __initdata memory_end_set;
unsigned long __initdata memory_end;
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
unsigned long VMALLOC_END;
EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
#ifdef CONFIG_64BIT
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
#endif
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
/*
* This is set up by the setup-routine at boot-time
* for S390 need to find out, what we have to setup
* using address 0x10400 ...
*/
#include <asm/setup.h>
/*
* condev= and conmode= setup parameter.
*/
static int __init condev_setup(char *str)
{
int vdev;
vdev = simple_strtoul(str, &str, 0);
if (vdev >= 0 && vdev < 65536) {
console_devno = vdev;
console_irq = -1;
}
return 1;
}
__setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
if (MACHINE_IS_KVM) {
if (sclp_has_vt220())
add_preferred_console("ttyS", 1, NULL);
else if (sclp_has_linemode())
add_preferred_console("ttyS", 0, NULL);
else
add_preferred_console("hvc", 0, NULL);
} else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
}
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
SET_CONSOLE_SCLP;
#endif
#if defined(CONFIG_TN3215_CONSOLE)
if (strncmp(str, "3215", 5) == 0)
SET_CONSOLE_3215;
#endif
#if defined(CONFIG_TN3270_CONSOLE)
if (strncmp(str, "3270", 5) == 0)
SET_CONSOLE_3270;
#endif
set_preferred_console();
return 1;
}
__setup("conmode=", conmode_setup);
static void __init conmode_default(void)
{
char query_buffer[1024];
char *ptr;
if (MACHINE_IS_VM) {
cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
ptr = strstr(query_buffer, "SUBCHANNEL =");
console_irq = simple_strtoul(ptr + 13, NULL, 16);
cpcmd("QUERY TERM", query_buffer, 1024, NULL);
ptr = strstr(query_buffer, "CONMODE");
/*
* Set the conmode to 3215 so that the device recognition
* will set the cu_type of the console to 3215. If the
* conmode is 3270 and we don't set it back then both
* 3215 and the 3270 driver will try to access the console
* device (3215 as console and 3270 as normal tty).
*/
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
if (ptr == NULL) {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
return;
}
if (strncmp(ptr + 8, "3270", 4) == 0) {
#if defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#elif defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
} else if (strncmp(ptr + 8, "3215", 4) == 0) {
#if defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
} else {
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
}
#ifdef CONFIG_ZFCPDUMP
static void __init setup_zfcpdump(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return;
if (OLDMEM_BASE)
return;
strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
console_loglevel = 2;
}
#else
static inline void setup_zfcpdump(void) {}
#endif /* CONFIG_ZFCPDUMP */
/*
* Reboot, halt and power_off stubs. They just call _machine_restart,
* _machine_halt or _machine_power_off.
*/
void machine_restart(char *command)
{
if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_restart(command);
}
void machine_halt(void)
{
if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_halt();
}
void machine_power_off(void)
{
if (!in_interrupt() || oops_in_progress)
/*
* Only unblank the console if we are called in enabled
* context or a bust_spinlocks cleared the way for us.
*/
console_unblank();
_machine_power_off();
}
/*
* Dummy power off function.
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL_GPL(pm_power_off);
static int __init early_parse_mem(char *p)
{
memory_end = memparse(p, &p);
memory_end_set = 1;
return 0;
}
early_param("mem", early_parse_mem);
static int __init parse_vmalloc(char *arg)
{
if (!arg)
return -EINVAL;
VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
return 0;
}
early_param("vmalloc", parse_vmalloc);
unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
EXPORT_SYMBOL_GPL(s390_user_mode);
static void __init set_user_mode_primary(void)
{
psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
#ifdef CONFIG_COMPAT
psw32_user_bits =
(psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
#endif
uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
}
static int __init early_parse_user_mode(char *p)
{
if (p && strcmp(p, "primary") == 0)
s390_user_mode = PRIMARY_SPACE_MODE;
else if (!p || strcmp(p, "home") == 0)
s390_user_mode = HOME_SPACE_MODE;
else
return 1;
return 0;
}
early_param("user_mode", early_parse_user_mode);
static void __init setup_addressing_mode(void)
{
if (s390_user_mode != PRIMARY_SPACE_MODE)
return;
set_user_mode_primary();
if (MACHINE_HAS_MVCOS)
pr_info("Address spaces switched, mvcos available\n");
else
pr_info("Address spaces switched, mvcos not available\n");
}
void *restart_stack __attribute__((__section__(".data")));
static void __init setup_lowcore(void)
{
struct _lowcore *lc;
/*
* Setup lowcore for boot cpu
*/
BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
lc->restart_psw.mask = psw_kernel_bits;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
lc->external_new_psw.mask = psw_kernel_bits |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->external_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
lc->svc_new_psw.mask = psw_kernel_bits |
PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
lc->program_new_psw.mask = psw_kernel_bits |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
lc->mcck_new_psw.mask = psw_kernel_bits;
lc->mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
lc->io_new_psw.mask = psw_kernel_bits |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->clock_comparator = -1ULL;
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->panic_stack = (unsigned long)
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lc->extended_save_area_addr = (__u32)
__alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
/* enable extended save area */
__ctl_set_bit(14, 29);
}
#else
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
lc->user_timer = S390_lowcore.user_timer;
lc->system_timer = S390_lowcore.system_timer;
lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
lc->ftrace_func = S390_lowcore.ftrace_func;
restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
restart_stack += ASYNC_SIZE;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necesary if
* PSW restart is done on an offline CPU that has lowcore zero.
*/
lc->restart_stack = (unsigned long) restart_stack;
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1UL;
/* Setup absolute zero lowcore */
mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
}
static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
static struct resource bss_resource = {
.name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
static struct resource __initdata *standard_resources[] = {
&code_resource,
&data_resource,
&bss_resource,
};
static void __init setup_resources(void)
{
struct resource *res, *std_res, *sub_res;
int i, j;
code_resource.start = (unsigned long) &_text;
code_resource.end = (unsigned long) &_etext - 1;
data_resource.start = (unsigned long) &_etext;
data_resource.end = (unsigned long) &_edata - 1;
bss_resource.start = (unsigned long) &__bss_start;
bss_resource.end = (unsigned long) &__bss_stop - 1;
for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size)
continue;
res = alloc_bootmem_low(sizeof(*res));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
switch (memory_chunk[i].type) {
case CHUNK_READ_WRITE:
res->name = "System RAM";
break;
case CHUNK_READ_ONLY:
res->name = "System ROM";
res->flags |= IORESOURCE_READONLY;
break;
default:
res->name = "reserved";
}
res->start = memory_chunk[i].addr;
res->end = res->start + memory_chunk[i].size - 1;
request_resource(&iomem_resource, res);
for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
std_res = standard_resources[j];
if (std_res->start < res->start ||
std_res->start > res->end)
continue;
if (std_res->end > res->end) {
sub_res = alloc_bootmem_low(sizeof(*sub_res));
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
request_resource(res, sub_res);
} else {
request_resource(res, std_res);
}
}
}
}
static void __init setup_memory_end(void)
{
unsigned long vmax, vmalloc_size, tmp;
unsigned long real_memory_size = 0;
int i;
#ifdef CONFIG_ZFCPDUMP
if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
memory_end = ZFCPDUMP_HSA_SIZE;
memory_end_set = 1;
}
#endif
memory_end &= PAGE_MASK;
/*
* Make sure all chunks are MAX_ORDER aligned so we don't need the
* extra checks that HOLES_IN_ZONE would require.
*/
for (i = 0; i < MEMORY_CHUNKS; i++) {
unsigned long start, end;
struct mem_chunk *chunk;
unsigned long align;
chunk = &memory_chunk[i];
if (!chunk->size)
continue;
align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
start = (chunk->addr + align - 1) & ~(align - 1);
end = (chunk->addr + chunk->size) & ~(align - 1);
if (start >= end)
memset(chunk, 0, sizeof(*chunk));
else {
chunk->addr = start;
chunk->size = end - start;
}
real_memory_size = max(real_memory_size,
chunk->addr + chunk->size);
}
/* Choose kernel address space layout: 2, 3, or 4 levels. */
#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
#else
vmalloc_size = VMALLOC_END ?: 96UL << 20;
vmax = 1UL << 31; /* 2-level kernel page table */
/* vmalloc area is at the end of the kernel address space. */
VMALLOC_END = vmax;
#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
vmemmap = (struct page *) tmp;
/* Take care that memory_end is set and <= vmemmap */
memory_end = min(memory_end ?: real_memory_size, tmp);
/* Fixup memory chunk array to fit into 0..memory_end */
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
if (!chunk->size)
continue;
if (chunk->addr >= memory_end) {
memset(chunk, 0, sizeof(*chunk));
continue;
}
if (chunk->addr + chunk->size > memory_end)
chunk->size = memory_end - chunk->addr;
}
}
static void __init setup_vmcoreinfo(void)
{
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
#ifdef CONFIG_CRASH_DUMP
/*
* Find suitable location for crashkernel memory
*/
static unsigned long __init find_crash_base(unsigned long crash_size,
char **msg)
{
unsigned long crash_base;
struct mem_chunk *chunk;
int i;
if (memory_chunk[0].size < crash_size) {
*msg = "first memory chunk must be at least crashkernel size";
return 0;
}
if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
return OLDMEM_BASE;
for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
chunk = &memory_chunk[i];
if (chunk->size == 0)
continue;
if (chunk->type != CHUNK_READ_WRITE)
continue;
if (chunk->size < crash_size)
continue;
crash_base = (chunk->addr + chunk->size) - crash_size;
if (crash_base < crash_size)
continue;
if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
continue;
if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
continue;
return crash_base;
}
*msg = "no suitable area found";
return 0;
}
/*
* Check if crash_base and crash_size is valid
*/
static int __init verify_crash_base(unsigned long crash_base,
unsigned long crash_size,
char **msg)
{
struct mem_chunk *chunk;
int i;
/*
* Because we do the swap to zero, we must have at least 'crash_size'
* bytes free space before crash_base
*/
if (crash_size > crash_base) {
*msg = "crashkernel offset must be greater than size";
return -EINVAL;
}
/* First memory chunk must be at least crash_size */
if (memory_chunk[0].size < crash_size) {
*msg = "first memory chunk must be at least crashkernel size";
return -EINVAL;
}
/* Check if we fit into the respective memory chunk */
for (i = 0; i < MEMORY_CHUNKS; i++) {
chunk = &memory_chunk[i];
if (chunk->size == 0)
continue;
if (crash_base < chunk->addr)
continue;
if (crash_base >= chunk->addr + chunk->size)
continue;
/* we have found the memory chunk */
if (crash_base + crash_size > chunk->addr + chunk->size) {
*msg = "selected memory chunk is too small for "
"crashkernel memory";
return -EINVAL;
}
return 0;
}
*msg = "invalid memory range specified";
return -EINVAL;
}
/*
* When kdump is enabled, we have to ensure that no memory from
* the area [0 - crashkernel memory size] and
* [crashk_res.start - crashk_res.end] is set offline.
*/
static int kdump_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct memory_notify *arg = data;
if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
return NOTIFY_BAD;
if (arg->start_pfn > PFN_DOWN(crashk_res.end))
return NOTIFY_OK;
if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
return NOTIFY_OK;
return NOTIFY_BAD;
}
static struct notifier_block kdump_mem_nb = {
.notifier_call = kdump_mem_notifier,
};
#endif
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
static void reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
unsigned long real_size = 0;
int i;
if (!OLDMEM_BASE)
return;
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
real_size = max(real_size, chunk->addr + chunk->size);
}
create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE);
create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE);
if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
else
saved_max_pfn = PFN_DOWN(real_size) - 1;
#endif
}
/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
static void __init reserve_crashkernel(void)
{
#ifdef CONFIG_CRASH_DUMP
unsigned long long crash_base, crash_size;
char *msg = NULL;
int rc;
rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
&crash_base);
if (rc || crash_size == 0)
return;
crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
if (register_memory_notifier(&kdump_mem_nb))
return;
if (!crash_base)
crash_base = find_crash_base(crash_size, &msg);
if (!crash_base) {
pr_info("crashkernel reservation failed: %s\n", msg);
unregister_memory_notifier(&kdump_mem_nb);
return;
}
if (verify_crash_base(crash_base, crash_size, &msg)) {
pr_info("crashkernel reservation failed: %s\n", msg);
unregister_memory_notifier(&kdump_mem_nb);
return;
}
if (!OLDMEM_BASE && MACHINE_IS_VM)
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
create_mem_hole(memory_chunk, crash_base, crash_size);
pr_info("Reserving %lluMB of memory at %lluMB "
"for crashkernel (System RAM: %luMB)\n",
crash_size >> 20, crash_base >> 20, memory_end >> 20);
os_info_crashkernel_add(crash_base, crash_size);
#endif
}
static void __init setup_memory(void)
{
unsigned long bootmap_size;
unsigned long start_pfn, end_pfn;
int i;
/*
* partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(&_end));
end_pfn = max_pfn = PFN_DOWN(memory_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd in case the bitmap of the bootmem allocater
* would overwrite it.
*/
if (INITRD_START && INITRD_SIZE) {
unsigned long bmap_size;
unsigned long start;
bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
bmap_size = PFN_PHYS(bmap_size);
if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE) {
/* Move initrd behind kdump oldmem */
if (start + INITRD_SIZE > OLDMEM_BASE &&
start < OLDMEM_BASE + OLDMEM_SIZE)
start = OLDMEM_BASE + OLDMEM_SIZE;
}
#endif
if (start + INITRD_SIZE > memory_end) {
pr_err("initrd extends beyond end of "
"memory (0x%08lx > 0x%08lx) "
"disabling initrd\n",
start + INITRD_SIZE, memory_end);
INITRD_START = INITRD_SIZE = 0;
} else {
pr_info("Moving initrd (0x%08lx -> "
"0x%08lx, size: %ld)\n",
INITRD_START, start, INITRD_SIZE);
memmove((void *) start, (void *) INITRD_START,
INITRD_SIZE);
INITRD_START = start;
}
}
}
#endif
/*
* Initialize the boot-time allocator
*/
bootmap_size = init_bootmem(start_pfn, end_pfn);
/*
* Register RAM areas with the bootmem allocator.
*/
for (i = 0; i < MEMORY_CHUNKS; i++) {
unsigned long start_chunk, end_chunk, pfn;
if (!memory_chunk[i].size)
continue;
start_chunk = PFN_DOWN(memory_chunk[i].addr);
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
end_chunk = min(end_chunk, end_pfn);
if (start_chunk >= end_chunk)
continue;
memblock_add_node(PFN_PHYS(start_chunk),
PFN_PHYS(end_chunk - start_chunk), 0);
pfn = max(start_chunk, start_pfn);
storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
}
psw_set_key(PAGE_DEFAULT_KEY);
free_bootmem_with_active_regions(0, max_pfn);
/*
* Reserve memory used for lowcore/command line/kernel image.
*/
reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
reserve_bootmem((unsigned long)_stext,
PFN_PHYS(start_pfn) - (unsigned long)_stext,
BOOTMEM_DEFAULT);
/*
* Reserve the bootmem bitmap itself as well. We do this in two
* steps (first step was init_bootmem()) because this catches
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*/
reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
BOOTMEM_DEFAULT);
#ifdef CONFIG_CRASH_DUMP
if (crashk_res.start)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1,
BOOTMEM_DEFAULT);
if (is_kdump_kernel())
reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
if (INITRD_START + INITRD_SIZE <= memory_end) {
reserve_bootmem(INITRD_START, INITRD_SIZE,
BOOTMEM_DEFAULT);
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
pr_err("initrd extends beyond end of "
"memory (0x%08lx > 0x%08lx) "
"disabling initrd\n",
initrd_start + INITRD_SIZE, memory_end);
initrd_start = initrd_end = 0;
}
}
#endif
}
/*
* Setup hardware capabilities.
*/
static void __init setup_hwcaps(void)
{
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
struct cpuid cpu_id;
int i;
/*
* The store facility list bits numbers as found in the principles
* of operation are numbered with bit 1UL<<31 as number 0 to
* bit 1UL<<0 as number 31.
* Bit 0: instructions named N3, "backported" to esa-mode
* Bit 2: z/Architecture mode is active
* Bit 7: the store-facility-list-extended facility is installed
* Bit 17: the message-security assist is installed
* Bit 19: the long-displacement facility is installed
* Bit 21: the extended-immediate facility is installed
* Bit 22: extended-translation facility 3 is installed
* Bit 30: extended-translation facility 3 enhancement facility
* These get translated to:
* HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
* HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
* HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
* HWCAP_S390_ETF3EH bit 8 (22 && 30).
*/
for (i = 0; i < 6; i++)
if (test_facility(stfl_bits[i]))
elf_hwcap |= 1UL << i;
if (test_facility(22) && test_facility(30))
elf_hwcap |= HWCAP_S390_ETF3EH;
/*
* Check for additional facilities with store-facility-list-extended.
* stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
* and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
* as stored by stfl, bits 32-xxx contain additional facilities.
* How many facility words are stored depends on the number of
* doublewords passed to the instruction. The additional facilities
* are:
* Bit 42: decimal floating point facility is installed
* Bit 44: perform floating point operation facility is installed
* translated to:
* HWCAP_S390_DFP bit 6 (42 && 44).
*/
if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
elf_hwcap |= HWCAP_S390_DFP;
/*
* Huge page support HWCAP_S390_HPAGE is bit 7.
*/
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
/*
* Transactional execution support HWCAP_S390_TE is bit 10.
*/
if (test_facility(50) && test_facility(73))
elf_hwcap |= HWCAP_S390_TE;
#endif
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x9672:
#if !defined(CONFIG_64BIT)
default: /* Use "g5" as default for 31 bit kernels. */
#endif
strcpy(elf_platform, "g5");
break;
case 0x2064:
case 0x2066:
#if defined(CONFIG_64BIT)
default: /* Use "z900" as default for 64 bit kernels. */
#endif
strcpy(elf_platform, "z900");
break;
case 0x2084:
case 0x2086:
strcpy(elf_platform, "z990");
break;
case 0x2094:
case 0x2096:
strcpy(elf_platform, "z9-109");
break;
case 0x2097:
case 0x2098:
strcpy(elf_platform, "z10");
break;
case 0x2817:
case 0x2818:
strcpy(elf_platform, "z196");
break;
case 0x2827:
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}
}
/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
void __init setup_arch(char **cmdline_p)
{
/*
* print what head.S has found out about the machine
*/
#ifndef CONFIG_64BIT
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 31-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 31-bit mode\n");
if (MACHINE_HAS_IEEE)
pr_info("The hardware system has IEEE compatible "
"floating point units\n");
else
pr_info("The hardware system has no IEEE compatible "
"floating point units\n");
#else /* CONFIG_64BIT */
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
else if (MACHINE_IS_KVM)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
#endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
ROOT_DEV = Root_RAM0;
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
if (MACHINE_HAS_MVCOS)
memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
else
memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
parse_early_param();
detect_memory_layout(memory_chunk, memory_end);
os_info_init();
setup_ipl();
reserve_oldmem();
setup_memory_end();
setup_addressing_mode();
reserve_crashkernel();
setup_memory();
setup_resources();
setup_vmcoreinfo();
setup_lowcore();
cpu_init();
s390_init_cpu_topology();
/*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
*/
setup_hwcaps();
/*
* Create kernel page tables and switch to virtual addressing.
*/
paging_init();
/* Setup default console */
conmode_default();
set_preferred_console();
/* Setup zfcpdump support */
setup_zfcpdump();
}
| gpl-2.0 |
Trinityhaxxor/Xperia_S_T-Core_Kernel | drivers/net/wireless/bcm4329/dhd_linux.c | 2127 | 83897 | /*
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
* Copyright (C) 1999-2010, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_linux.c,v 1.65.4.9.2.12.2.104.4.40 2011/02/03 19:55:18 Exp $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/inetdevice.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <epivers.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <proto/ethernet.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_dbg.h>
#include <wl_iw.h>
#ifdef CONFIG_HAS_WAKELOCK
#include <linux/wakelock.h>
#endif
#ifdef CUSTOMER_HW2
#include <linux/platform_device.h>
#ifdef CONFIG_WIFI_CONTROL_FUNC
#include <linux/wlan_plat.h>
static struct wifi_platform_data *wifi_control_data = NULL;
#endif
struct semaphore wifi_control_sem;
static struct resource *wifi_irqres = NULL;
int wifi_get_irq_number(unsigned long *irq_flags_ptr)
{
if (wifi_irqres) {
*irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
return (int)wifi_irqres->start;
}
#ifdef CUSTOM_OOB_GPIO_NUM
return CUSTOM_OOB_GPIO_NUM;
#else
return -1;
#endif
}
int wifi_set_carddetect(int on)
{
printk("%s = %d\n", __FUNCTION__, on);
#ifdef CONFIG_WIFI_CONTROL_FUNC
if (wifi_control_data && wifi_control_data->set_carddetect) {
wifi_control_data->set_carddetect(on);
}
#endif
return 0;
}
int wifi_set_power(int on, unsigned long msec)
{
printk("%s = %d\n", __FUNCTION__, on);
#ifdef CONFIG_WIFI_CONTROL_FUNC
if (wifi_control_data && wifi_control_data->set_power) {
wifi_control_data->set_power(on);
}
#endif
if (msec)
mdelay(msec);
return 0;
}
int wifi_set_reset(int on, unsigned long msec)
{
DHD_TRACE(("%s = %d\n", __FUNCTION__, on));
#ifdef CONFIG_WIFI_CONTROL_FUNC
if (wifi_control_data && wifi_control_data->set_reset) {
wifi_control_data->set_reset(on);
}
#endif
if (msec)
mdelay(msec);
return 0;
}
int wifi_get_mac_addr(unsigned char *buf)
{
DHD_TRACE(("%s\n", __FUNCTION__));
if (!buf)
return -EINVAL;
#ifdef CONFIG_WIFI_CONTROL_FUNC
if (wifi_control_data && wifi_control_data->get_mac_addr) {
return wifi_control_data->get_mac_addr(buf);
}
#endif
return -EOPNOTSUPP;
}
void *wifi_get_country_code(char *ccode)
{
DHD_TRACE(("%s\n", __FUNCTION__));
#ifdef CONFIG_WIFI_CONTROL_FUNC
if (!ccode)
return NULL;
if (wifi_control_data && wifi_control_data->get_country_code) {
return wifi_control_data->get_country_code(ccode);
}
#endif
return NULL;
}
static int wifi_probe(struct platform_device *pdev)
{
#ifdef CONFIG_WIFI_CONTROL_FUNC
struct wifi_platform_data *wifi_ctrl =
(struct wifi_platform_data *)(pdev->dev.platform_data);
wifi_control_data = wifi_ctrl;
#endif
DHD_TRACE(("## %s\n", __FUNCTION__));
wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
wifi_set_power(1, 0); /* Power On */
wifi_set_carddetect(1); /* CardDetect (0->1) */
up(&wifi_control_sem);
return 0;
}
static int wifi_remove(struct platform_device *pdev)
{
#ifdef CONFIG_WIFI_CONTROL_FUNC
struct wifi_platform_data *wifi_ctrl =
(struct wifi_platform_data *)(pdev->dev.platform_data);
wifi_control_data = wifi_ctrl;
#endif
DHD_TRACE(("## %s\n", __FUNCTION__));
wifi_set_power(0, 0); /* Power Off */
wifi_set_carddetect(0); /* CardDetect (1->0) */
up(&wifi_control_sem);
return 0;
}
static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
{
DHD_TRACE(("##> %s\n", __FUNCTION__));
#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(0);
#endif /* (OOB_INTR_ONLY) */
return 0;
}
static int wifi_resume(struct platform_device *pdev)
{
DHD_TRACE(("##> %s\n", __FUNCTION__));
#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(1);
#endif /* (OOB_INTR_ONLY) */
return 0;
}
static struct platform_driver wifi_device = {
.probe = wifi_probe,
.remove = wifi_remove,
.suspend = wifi_suspend,
.resume = wifi_resume,
.driver = {
.name = "bcm4329_wlan",
}
};
int wifi_add_dev(void)
{
DHD_TRACE(("## Calling platform_driver_register\n"));
return platform_driver_register(&wifi_device);
}
void wifi_del_dev(void)
{
DHD_TRACE(("## Unregister platform_driver_register\n"));
platform_driver_unregister(&wifi_device);
}
#endif /* defined(CUSTOMER_HW2) */
static int dhd_device_event(struct notifier_block *this, unsigned long event,
void *ptr);
static struct notifier_block dhd_notifier = {
.notifier_call = dhd_device_event
};
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
#if defined(OOB_INTR_ONLY)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif /* defined(OOB_INTR_ONLY) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
MODULE_LICENSE("GPL v2");
#endif /* LinuxVer */
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
const char *
print_tainted()
{
return "";
}
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
/* Linux wireless extension support */
#if defined(CONFIG_WIRELESS_EXT)
#include <wl_iw.h>
#endif /* defined(CONFIG_WIRELESS_EXT) */
extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len);
#if defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
#ifdef PKT_FILTER_SUPPORT
extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
#endif
/* Interface control information */
typedef struct dhd_if {
struct dhd_info *info; /* back pointer to dhd_info */
/* OS/stack specifics */
struct net_device *net;
struct net_device_stats stats;
int idx; /* iface idx in dongle */
int state; /* interface state */
uint subunit; /* subunit */
uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ+1]; /* linux interface name */
} dhd_if_t;
/* Local private structure (extension of pub) */
typedef struct dhd_info {
#if defined(CONFIG_WIRELESS_EXT)
wl_iw_t iw; /* wireless extensions state (must be first) */
#endif /* defined(CONFIG_WIRELESS_EXT) */
dhd_pub_t pub;
/* OS/stack specifics */
dhd_if_t *iflist[DHD_MAX_IFS];
struct mutex proto_sem;
wait_queue_head_t ioctl_resp_wait;
struct timer_list timer;
bool wd_timer_valid;
struct tasklet_struct tasklet;
spinlock_t sdlock;
spinlock_t txqlock;
spinlock_t dhd_lock;
/* Thread based operation */
bool threads_only;
struct mutex sdsem;
long watchdog_pid;
struct semaphore watchdog_sem;
struct completion watchdog_exited;
long dpc_pid;
struct semaphore dpc_sem;
struct completion dpc_exited;
/* Wakelocks */
#ifdef CONFIG_HAS_WAKELOCK
struct wake_lock wl_wifi; /* Wifi wakelock */
struct wake_lock wl_rxwake; /* Wifi rx wakelock */
#endif
spinlock_t wl_lock;
int wl_count;
int wl_packet;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
struct mutex wl_start_lock; /* mutex when START called to prevent any other Linux calls */
#endif
/* Thread to issue ioctl for multicast */
long sysioc_pid;
struct semaphore sysioc_sem;
struct completion sysioc_exited;
bool set_multicast;
bool set_macaddress;
struct ether_addr macvalue;
wait_queue_head_t ctrl_wait;
atomic_t pend_8021x_cnt;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif /* CONFIG_HAS_EARLYSUSPEND */
} dhd_info_t;
/* Definitions to provide path to the firmware and nvram
* example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
*/
char firmware_path[MOD_PARAM_PATHLEN];
char nvram_path[MOD_PARAM_PATHLEN];
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
struct semaphore dhd_registration_sem;
#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
/* load firmware and/or nvram values from the filesystem */
module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0);
module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
/* Error bits */
module_param(dhd_msg_level, int, 0);
/* Spawn a thread for system ioctls (set mac, set mcast) */
uint dhd_sysioc = TRUE;
module_param(dhd_sysioc, uint, 0);
/* Watchdog interval */
uint dhd_watchdog_ms = 10;
module_param(dhd_watchdog_ms, uint, 0);
#ifdef DHD_DEBUG
/* Console poll interval */
uint dhd_console_ms = 0;
module_param(dhd_console_ms, uint, 0);
#endif /* DHD_DEBUG */
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
uint dhd_arp_mode = 0xb;
module_param(dhd_arp_mode, uint, 0);
/* ARP offload enable */
uint dhd_arp_enable = TRUE;
module_param(dhd_arp_enable, uint, 0);
/* Global Pkt filter enable control */
uint dhd_pkt_filter_enable = TRUE;
module_param(dhd_pkt_filter_enable, uint, 0);
/* Pkt filter init setup */
uint dhd_pkt_filter_init = 0;
module_param(dhd_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
uint dhd_master_mode = TRUE;
module_param(dhd_master_mode, uint, 1);
/* Watchdog thread priority, -1 to use kernel timer */
int dhd_watchdog_prio = 97;
module_param(dhd_watchdog_prio, int, 0);
/* DPC thread priority, -1 to use tasklet */
int dhd_dpc_prio = 98;
module_param(dhd_dpc_prio, int, 0);
/* DPC thread priority, -1 to use tasklet */
extern int dhd_dongle_memsize;
module_param(dhd_dongle_memsize, int, 0);
/* Control fw roaming */
#ifdef CUSTOMER_HW2
uint dhd_roam = 0;
#else
uint dhd_roam = 1;
#endif
/* Control radio state */
uint dhd_radio_up = 1;
/* Network inteface name */
char iface_name[IFNAMSIZ];
module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define DAEMONIZE(a) daemonize(a); \
allow_signal(SIGKILL); \
allow_signal(SIGTERM);
#else /* Linux 2.4 (w/o preemption patch) */
#define RAISE_RX_SOFTIRQ() \
cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
#define DAEMONIZE(a) daemonize(); \
do { if (a) \
strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a) + 1))); \
} while (0);
#endif /* LINUX_VERSION_CODE */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define BLOCKABLE() (!in_atomic())
#else
#define BLOCKABLE() (!in_interrupt())
#endif
/* The following are specific to the SDIO dongle */
/* IOCTL response timeout */
int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
/* Idle timeout for backplane clock */
int dhd_idletime = DHD_IDLETIME_TICKS;
module_param(dhd_idletime, int, 0);
/* Use polling */
uint dhd_poll = FALSE;
module_param(dhd_poll, uint, 0);
/* Use interrupts */
uint dhd_intr = TRUE;
module_param(dhd_intr, uint, 0);
/* SDIO Drive Strength (in milliamps) */
uint dhd_sdiod_drive_strength = 6;
module_param(dhd_sdiod_drive_strength, uint, 0);
/* Tx/Rx bounds */
extern uint dhd_txbound;
extern uint dhd_rxbound;
module_param(dhd_txbound, uint, 0);
module_param(dhd_rxbound, uint, 0);
/* Deferred transmits */
extern uint dhd_deferred_tx;
module_param(dhd_deferred_tx, uint, 0);
#ifdef SDTEST
/* Echo packet generator (pkts/s) */
uint dhd_pktgen = 0;
module_param(dhd_pktgen, uint, 0);
/* Echo packet len (0 => sawtooth, max 2040) */
uint dhd_pktgen_len = 0;
module_param(dhd_pktgen_len, uint, 0);
#endif
/* Version string to report */
#ifdef DHD_DEBUG
#ifndef SRCBASE
#define SRCBASE "drivers/net/wireless/bcm4329"
#endif
#define DHD_COMPILED "\nCompiled in " SRCBASE
#else
#define DHD_COMPILED
#endif
static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
#ifdef DHD_DEBUG
"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
#endif
;
#if defined(CONFIG_WIRELESS_EXT)
struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
#endif /* defined(CONFIG_WIRELESS_EXT) */
static void dhd_dpc(ulong data);
/* forward decl */
extern int dhd_wait_pend8021x(struct net_device *dev);
#ifdef TOE
#ifndef BDC
#error TOE requires BDC
#endif /* !BDC */
static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
#endif /* TOE */
static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event_ptr, void **data_ptr);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
{
int ret = NOTIFY_DONE;
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
dhd_mmc_suspend = TRUE;
ret = NOTIFY_OK;
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
dhd_mmc_suspend = FALSE;
ret = NOTIFY_OK;
break;
}
smp_mb();
return ret;
}
static struct notifier_block dhd_sleep_pm_notifier = {
.notifier_call = dhd_sleep_pm_callback,
.priority = 0
};
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
{
#ifdef PKT_FILTER_SUPPORT
DHD_TRACE(("%s: %d\n", __FUNCTION__, value));
/* 1 - Enable packet filter, only allow unicast packet to send up */
/* 0 - Disable packet filter */
if (dhd_pkt_filter_enable) {
int i;
for (i = 0; i < dhd->pktfilter_count; i++) {
dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
value, dhd_master_mode);
}
}
#endif
}
#if defined(CONFIG_HAS_EARLYSUSPEND)
static int dhd_set_suspend(int value, dhd_pub_t *dhd)
{
int power_mode = PM_MAX;
/* wl_pkt_filter_enable_t enable_parm; */
char iovbuf[32];
int bcn_li_dtim = 3;
#ifdef CUSTOMER_HW2
uint roamvar = 1;
#endif /* CUSTOMER_HW2 */
DHD_TRACE(("%s: enter, value = %d in_suspend = %d\n",
__FUNCTION__, value, dhd->in_suspend));
if (dhd && dhd->up) {
if (value && dhd->in_suspend) {
/* Kernel suspended */
DHD_TRACE(("%s: force extra Suspend setting \n", __FUNCTION__));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM,
(char *)&power_mode, sizeof(power_mode));
/* Enable packet filter, only allow unicast packet to send up */
dhd_set_packet_filter(1, dhd);
/* if dtim skip setup as default force it to wake each thrid dtim
* for better power saving.
* Note that side effect is chance to miss BC/MC packet
*/
bcn_li_dtim = dhd_get_dtim_skip(dhd);
bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#ifdef CUSTOMER_HW2
/* Disable build-in roaming during suspend */
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#endif /* CUSTOMER_HW2 */
} else {
/* Kernel resumed */
DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__));
power_mode = PM_FAST;
dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode,
sizeof(power_mode));
/* disable pkt filter */
dhd_set_packet_filter(0, dhd);
/* restore pre-suspend setting for dtim_skip */
bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#ifdef CUSTOMER_HW2
roamvar = dhd_roam;
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
#endif /* CUSTOMER_HW2 */
}
}
return 0;
}
static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
{
dhd_pub_t *dhdp = &dhd->pub;
dhd_os_wake_lock(dhdp);
dhd_os_proto_block(dhdp);
/* Set flag when early suspend was called */
dhdp->in_suspend = val;
if (!dhdp->suspend_disable_flag)
dhd_set_suspend(val, dhdp);
dhd_os_proto_unblock(dhdp);
dhd_os_wake_unlock(dhdp);
}
static void dhd_early_suspend(struct early_suspend *h)
{
struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
DHD_TRACE(("%s: enter\n", __FUNCTION__));
if (dhd)
dhd_suspend_resume_helper(dhd, 1);
}
static void dhd_late_resume(struct early_suspend *h)
{
struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
DHD_TRACE(("%s: enter\n", __FUNCTION__));
if (dhd)
dhd_suspend_resume_helper(dhd, 0);
}
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
/*
* Generalized timeout mechanism. Uses spin sleep with exponential back-off until
* the sleep time reaches one jiffy, then switches over to task delay. Usage:
*
* dhd_timeout_start(&tmo, usec);
* while (!dhd_timeout_expired(&tmo))
* if (poll_something())
* break;
* if (dhd_timeout_expired(&tmo))
* fatal();
*/
void
dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
{
tmo->limit = usec;
tmo->increment = 0;
tmo->elapsed = 0;
tmo->tick = 1000000 / HZ;
}
int
dhd_timeout_expired(dhd_timeout_t *tmo)
{
/* Does nothing the first call */
if (tmo->increment == 0) {
tmo->increment = 1;
return 0;
}
if (tmo->elapsed >= tmo->limit)
return 1;
/* Add the delay that's about to take place */
tmo->elapsed += tmo->increment;
if (tmo->increment < tmo->tick) {
OSL_DELAY(tmo->increment);
tmo->increment *= 2;
if (tmo->increment > tmo->tick)
tmo->increment = tmo->tick;
} else {
wait_queue_head_t delay_wait;
DECLARE_WAITQUEUE(wait, current);
int pending;
init_waitqueue_head(&delay_wait);
add_wait_queue(&delay_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
pending = signal_pending(current);
remove_wait_queue(&delay_wait, &wait);
set_current_state(TASK_RUNNING);
if (pending)
return 1; /* Interrupted */
}
return 0;
}
static int
dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
{
int i = 0;
ASSERT(dhd);
while (i < DHD_MAX_IFS) {
if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
return i;
i++;
}
return DHD_BAD_IF;
}
int
dhd_ifname2idx(dhd_info_t *dhd, char *name)
{
int i = DHD_MAX_IFS;
ASSERT(dhd);
if (name == NULL || *name == '\0')
return 0;
while (--i > 0)
if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
break;
DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
return i; /* default - the primary interface */
}
char *
dhd_ifname(dhd_pub_t *dhdp, int ifidx)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
ASSERT(dhd);
if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
return "<if_bad>";
}
if (dhd->iflist[ifidx] == NULL) {
DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
return "<if_null>";
}
if (dhd->iflist[ifidx]->net)
return dhd->iflist[ifidx]->net->name;
return "<if_none>";
}
static void
_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
{
struct net_device *dev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
struct netdev_hw_addr *ha;
#else
struct dev_mc_list *mclist;
#endif
uint32 allmulti, cnt;
wl_ioctl_t ioc;
char *buf, *bufp;
uint buflen;
int ret;
ASSERT(dhd && dhd->iflist[ifidx]);
dev = dhd->iflist[ifidx]->net;
NETIF_ADDR_LOCK(dev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
cnt = netdev_mc_count(dev);
#else
cnt = dev->mc_count;
#endif
NETIF_ADDR_UNLOCK(dev);
/* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
/* Send down the multicast list first. */
buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
dhd_ifname(&dhd->pub, ifidx), cnt));
return;
}
strcpy(bufp, "mcast_list");
bufp += strlen("mcast_list") + 1;
cnt = htol32(cnt);
memcpy(bufp, &cnt, sizeof(cnt));
bufp += sizeof(cnt);
NETIF_ADDR_LOCK(dev);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
netdev_for_each_mc_addr(ha, dev) {
if (!cnt)
break;
memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
bufp += ETHER_ADDR_LEN;
cnt--;
}
#else
for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) {
memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
bufp += ETHER_ADDR_LEN;
}
#endif
NETIF_ADDR_UNLOCK(dev);
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = TRUE;
ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
dhd_ifname(&dhd->pub, ifidx), cnt));
allmulti = cnt ? TRUE : allmulti;
}
MFREE(dhd->pub.osh, buf, buflen);
/* Now send the allmulti setting. This is based on the setting in the
* net_device flags, but might be modified above to be turned on if we
* were trying to set some addresses and dongle rejected it...
*/
buflen = sizeof("allmulti") + sizeof(allmulti);
if (!(buf = MALLOC(dhd->pub.osh, buflen))) {
DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx)));
return;
}
allmulti = htol32(allmulti);
if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) {
DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen));
MFREE(dhd->pub.osh, buf, buflen);
return;
}
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = TRUE;
ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set allmulti %d failed\n",
dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
}
MFREE(dhd->pub.osh, buf, buflen);
/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
allmulti = htol32(allmulti);
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_PROMISC;
ioc.buf = &allmulti;
ioc.len = sizeof(allmulti);
ioc.set = TRUE;
ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set promisc %d failed\n",
dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
}
}
static int
_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
{
char buf[32];
wl_ioctl_t ioc;
int ret;
DHD_TRACE(("%s enter\n", __FUNCTION__));
if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) {
DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx)));
return -1;
}
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = 32;
ioc.set = TRUE;
ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
} else {
memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
}
return ret;
}
#ifdef SOFTAP
extern struct net_device *ap_net_dev;
/* semaphore that the soft AP CODE waits on */
extern struct semaphore ap_eth_sema;
#endif
static void
dhd_op_if(dhd_if_t *ifp)
{
dhd_info_t *dhd;
int ret = 0, err = 0;
#ifdef SOFTAP
unsigned long flags;
#endif
ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
dhd = ifp->info;
DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
switch (ifp->state) {
case WLC_E_IF_ADD:
/*
* Delete the existing interface before overwriting it
* in case we missed the WLC_E_IF_DEL event.
*/
if (ifp->net != NULL) {
DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
__FUNCTION__, ifp->net->name));
netif_stop_queue(ifp->net);
unregister_netdev(ifp->net);
free_netdev(ifp->net);
}
/* Allocate etherdev, including space for private structure */
if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
ret = -ENOMEM;
}
if (ret == 0) {
strcpy(ifp->net->name, ifp->name);
memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
__FUNCTION__, err));
ret = -EOPNOTSUPP;
} else {
#ifdef SOFTAP
flags = dhd_os_spin_lock(&dhd->pub);
/* save ptr to wl0.1 netdev for use in wl_iw.c */
ap_net_dev = ifp->net;
/* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
up(&ap_eth_sema);
dhd_os_spin_unlock(&dhd->pub, flags);
#endif
DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
current->pid, ifp->net->name));
ifp->state = 0;
}
}
break;
case WLC_E_IF_DEL:
if (ifp->net != NULL) {
DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n", __FUNCTION__));
netif_stop_queue(ifp->net);
unregister_netdev(ifp->net);
ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
}
break;
default:
DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
ASSERT(!ifp->state);
break;
}
if (ret < 0) {
if (ifp->net) {
free_netdev(ifp->net);
}
dhd->iflist[ifp->idx] = NULL;
MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
#ifdef SOFTAP
flags = dhd_os_spin_lock(&dhd->pub);
if (ifp->net == ap_net_dev)
ap_net_dev = NULL; /* NULL SOFTAP global as well */
dhd_os_spin_unlock(&dhd->pub, flags);
#endif /* SOFTAP */
}
}
static int
_dhd_sysioc_thread(void *data)
{
dhd_info_t *dhd = (dhd_info_t *)data;
int i;
#ifdef SOFTAP
bool in_ap = FALSE;
unsigned long flags;
#endif
DAEMONIZE("dhd_sysioc");
while (down_interruptible(&dhd->sysioc_sem) == 0) {
dhd_os_start_lock(&dhd->pub);
dhd_os_wake_lock(&dhd->pub);
for (i = 0; i < DHD_MAX_IFS; i++) {
if (dhd->iflist[i]) {
DHD_TRACE(("%s: interface %d\n",__FUNCTION__, i));
#ifdef SOFTAP
flags = dhd_os_spin_lock(&dhd->pub);
in_ap = (ap_net_dev != NULL);
dhd_os_spin_unlock(&dhd->pub, flags);
#endif /* SOFTAP */
if (dhd->iflist[i]->state)
dhd_op_if(dhd->iflist[i]);
#ifdef SOFTAP
if (dhd->iflist[i] == NULL) {
DHD_TRACE(("%s: interface %d just been removed!\n\n", __FUNCTION__, i));
continue;
}
if (in_ap && dhd->set_macaddress) {
DHD_TRACE(("attempt to set MAC for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
dhd->set_macaddress = FALSE;
continue;
}
if (in_ap && dhd->set_multicast) {
DHD_TRACE(("attempt to set MULTICAST list for %s in AP Mode blocked.\n", dhd->iflist[i]->net->name));
dhd->set_multicast = FALSE;
continue;
}
#endif /* SOFTAP */
if (dhd->set_multicast) {
dhd->set_multicast = FALSE;
_dhd_set_multicast_list(dhd, i);
}
if (dhd->set_macaddress) {
dhd->set_macaddress = FALSE;
_dhd_set_mac_address(dhd, i, &dhd->macvalue);
}
}
}
dhd_os_wake_unlock(&dhd->pub);
dhd_os_start_unlock(&dhd->pub);
}
DHD_TRACE(("%s: stopped\n",__FUNCTION__));
complete_and_exit(&dhd->sysioc_exited, 0);
}
static int
dhd_set_mac_address(struct net_device *dev, void *addr)
{
int ret = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
struct sockaddr *sa = (struct sockaddr *)addr;
int ifidx;
DHD_TRACE(("%s: Enter\n",__FUNCTION__));
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return -1;
ASSERT(dhd->sysioc_pid >= 0);
memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
dhd->set_macaddress = TRUE;
up(&dhd->sysioc_sem);
return ret;
}
static void
dhd_set_multicast_list(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ifidx;
DHD_TRACE(("%s: Enter\n",__FUNCTION__));
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return;
ASSERT(dhd->sysioc_pid >= 0);
dhd->set_multicast = TRUE;
up(&dhd->sysioc_sem);
}
int
dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
int ret;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
/* Reject if down */
if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
return -ENODEV;
}
/* Update multicast statistic */
if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_ADDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
struct ether_header *eh = (struct ether_header *)pktdata;
if (ETHER_ISMULTI(eh->ether_dhost))
dhdp->tx_multicast++;
if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
atomic_inc(&dhd->pend_8021x_cnt);
}
/* Look into the packet and update the packet priority */
if ((PKTPRIO(pktbuf) == 0))
pktsetprio(pktbuf, FALSE);
/* If the protocol uses a data header, apply it */
dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
/* Use bus module to send data frame */
#ifdef BCMDBUS
ret = dbus_send_pkt(dhdp->dbus, pktbuf, NULL /* pktinfo */);
#else
ret = dhd_bus_txdata(dhdp->bus, pktbuf);
#endif /* BCMDBUS */
return ret;
}
static int
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
void *pktbuf;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
int ifidx;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_os_wake_lock(&dhd->pub);
/* Reject if down */
if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
__FUNCTION__, dhd->pub.up, dhd->pub.busstate));
netif_stop_queue(net);
/* Send Event when bus down detected during data session */
if (dhd->pub.busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
net_os_send_hang_message(net);
}
dhd_os_wake_unlock(&dhd->pub);
return -ENODEV;
}
ifidx = dhd_net2idx(dhd, net);
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
netif_stop_queue(net);
dhd_os_wake_unlock(&dhd->pub);
return -ENODEV;
}
/* Make sure there's enough room for any header */
if (skb_headroom(skb) < dhd->pub.hdrlen) {
struct sk_buff *skb2;
DHD_INFO(("%s: insufficient headroom\n",
dhd_ifname(&dhd->pub, ifidx)));
dhd->pub.tx_realloc++;
skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen);
dev_kfree_skb(skb);
if ((skb = skb2) == NULL) {
DHD_ERROR(("%s: skb_realloc_headroom failed\n",
dhd_ifname(&dhd->pub, ifidx)));
ret = -ENOMEM;
goto done;
}
}
/* Convert to packet */
if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
dhd_ifname(&dhd->pub, ifidx)));
dev_kfree_skb_any(skb);
ret = -ENOMEM;
goto done;
}
ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
done:
if (ret)
dhd->pub.dstats.tx_dropped++;
else
dhd->pub.tx_packets++;
dhd_os_wake_unlock(&dhd->pub);
/* Return ok: we always eat the packet */
return 0;
}
void
dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
{
struct net_device *net;
dhd_info_t *dhd = dhdp->info;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhdp->txoff = state;
ASSERT(dhd && dhd->iflist[ifidx]);
net = dhd->iflist[ifidx]->net;
if (state == ON)
netif_stop_queue(net);
else
netif_wake_queue(net);
}
void
dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
struct sk_buff *skb;
uchar *eth;
uint len;
void * data, *pnext, *save_pktbuf;
int i;
dhd_if_t *ifp;
wl_event_msg_t event;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
save_pktbuf = pktbuf;
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
pnext = PKTNEXT(dhdp->osh, pktbuf);
PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
skb = PKTTONATIVE(dhdp->osh, pktbuf);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
* Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
* to perform skb_pull inside vs ETH_HLEN. Since to avoid
* coping of the packet coming from the network stack to add
* BDC, Hardware header etc, during network interface registration
* we set the 'net->hard_header_len' to ETH_HLEN + extra space required
* for BDC, Hardware header etc. and not just the ETH_HLEN
*/
eth = skb->data;
len = skb->len;
ifp = dhd->iflist[ifidx];
if (ifp == NULL)
ifp = dhd->iflist[0];
ASSERT(ifp);
skb->dev = ifp->net;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST) {
dhd->pub.rx_multicast++;
}
skb->data = eth;
skb->len = len;
/* Strip header, count, deliver upward */
skb_pull(skb, ETH_HLEN);
/* Process special event packets and then discard them */
if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM)
dhd_wl_host_event(dhd, &ifidx,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
skb->mac_header,
#else
skb->mac.raw,
#endif
&event,
&data);
ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
ifp = dhd->iflist[ifidx];
if (ifp->net)
ifp->net->last_rx = jiffies;
dhdp->dstats.rx_bytes += skb->len;
dhdp->rx_packets++; /* Local count */
if (in_interrupt()) {
netif_rx(skb);
} else {
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service
* the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
* by netif_rx_ni(), but in earlier kernels, we need
* to do it manually.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
netif_rx_ni(skb);
#else
ulong flags;
netif_rx(skb);
local_irq_save(flags);
RAISE_RX_SOFTIRQ();
local_irq_restore(flags);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
}
dhd_os_wake_lock_timeout_enable(dhdp);
}
void
dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
{
/* Linux version has nothing to do */
return;
}
void
dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
{
uint ifidx;
dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
struct ether_header *eh;
uint16 type;
dhd_prot_hdrpull(dhdp, &ifidx, txp);
eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
type = ntoh16(eh->ether_type);
if (type == ETHER_TYPE_802_1X)
atomic_dec(&dhd->pend_8021x_cnt);
}
static struct net_device_stats *
dhd_get_stats(struct net_device *net)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
dhd_if_t *ifp;
int ifidx;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ifidx = dhd_net2idx(dhd, net);
if (ifidx == DHD_BAD_IF)
return NULL;
ifp = dhd->iflist[ifidx];
ASSERT(dhd && ifp);
if (dhd->pub.up) {
/* Use the protocol to get dongle stats */
dhd_prot_dstats(&dhd->pub);
}
/* Copy dongle stats to net device stats */
ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
ifp->stats.multicast = dhd->pub.dstats.multicast;
return &ifp->stats;
}
static int
dhd_watchdog_thread(void *data)
{
dhd_info_t *dhd = (dhd_info_t *)data;
/* This thread doesn't need any user-level access,
* so get rid of all our resources
*/
#ifdef DHD_SCHED
if (dhd_watchdog_prio > 0) {
struct sched_param param;
param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
dhd_watchdog_prio:(MAX_RT_PRIO-1);
setScheduler(current, SCHED_FIFO, ¶m);
}
#endif /* DHD_SCHED */
DAEMONIZE("dhd_watchdog");
/* Run until signal received */
while (1) {
if (down_interruptible (&dhd->watchdog_sem) == 0) {
dhd_os_sdlock(&dhd->pub);
if (dhd->pub.dongle_reset == FALSE) {
DHD_TIMER(("%s:\n", __FUNCTION__));
/* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
/* Count the tick for reference */
dhd->pub.tickcnt++;
/* Reschedule the watchdog */
if (dhd->wd_timer_valid)
mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
}
dhd_os_sdunlock(&dhd->pub);
dhd_os_wake_unlock(&dhd->pub);
} else {
break;
}
}
complete_and_exit(&dhd->watchdog_exited, 0);
}
static void
dhd_watchdog(ulong data)
{
dhd_info_t *dhd = (dhd_info_t *)data;
dhd_os_wake_lock(&dhd->pub);
if (dhd->pub.dongle_reset) {
dhd_os_wake_unlock(&dhd->pub);
return;
}
if (dhd->watchdog_pid >= 0) {
up(&dhd->watchdog_sem);
return;
}
dhd_os_sdlock(&dhd->pub);
/* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
/* Count the tick for reference */
dhd->pub.tickcnt++;
/* Reschedule the watchdog */
if (dhd->wd_timer_valid)
mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
dhd_os_sdunlock(&dhd->pub);
dhd_os_wake_unlock(&dhd->pub);
}
static int
dhd_dpc_thread(void *data)
{
dhd_info_t *dhd = (dhd_info_t *)data;
/* This thread doesn't need any user-level access,
* so get rid of all our resources
*/
#ifdef DHD_SCHED
if (dhd_dpc_prio > 0)
{
struct sched_param param;
param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
setScheduler(current, SCHED_FIFO, ¶m);
}
#endif /* DHD_SCHED */
DAEMONIZE("dhd_dpc");
/* Run until signal received */
while (1) {
if (down_interruptible(&dhd->dpc_sem) == 0) {
/* Call bus dpc unless it indicated down (then clean stop) */
if (dhd->pub.busstate != DHD_BUS_DOWN) {
if (dhd_bus_dpc(dhd->pub.bus)) {
up(&dhd->dpc_sem);
}
else {
dhd_os_wake_unlock(&dhd->pub);
}
} else {
if (dhd->pub.up)
dhd_bus_stop(dhd->pub.bus, TRUE);
dhd_os_wake_unlock(&dhd->pub);
}
}
else
break;
}
complete_and_exit(&dhd->dpc_exited, 0);
}
static void
dhd_dpc(ulong data)
{
dhd_info_t *dhd;
dhd = (dhd_info_t *)data;
/* Call bus dpc unless it indicated down (then clean stop) */
if (dhd->pub.busstate != DHD_BUS_DOWN) {
if (dhd_bus_dpc(dhd->pub.bus))
tasklet_schedule(&dhd->tasklet);
} else {
dhd_bus_stop(dhd->pub.bus, TRUE);
}
}
void
dhd_sched_dpc(dhd_pub_t *dhdp)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
dhd_os_wake_lock(dhdp);
if (dhd->dpc_pid >= 0) {
up(&dhd->dpc_sem);
return;
}
tasklet_schedule(&dhd->tasklet);
}
#ifdef TOE
/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
static int
dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
{
wl_ioctl_t ioc;
char buf[32];
int ret;
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_GET_VAR;
ioc.buf = buf;
ioc.len = (uint)sizeof(buf);
ioc.set = FALSE;
strcpy(buf, "toe_ol");
if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
/* Check for older dongle image that doesn't support toe_ol */
if (ret == -EIO) {
DHD_ERROR(("%s: toe not supported by device\n",
dhd_ifname(&dhd->pub, ifidx)));
return -EOPNOTSUPP;
}
DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
memcpy(toe_ol, buf, sizeof(uint32));
return 0;
}
/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
static int
dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
{
wl_ioctl_t ioc;
char buf[32];
int toe, ret;
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = WLC_SET_VAR;
ioc.buf = buf;
ioc.len = (uint)sizeof(buf);
ioc.set = TRUE;
/* Set toe_ol as requested */
strcpy(buf, "toe_ol");
memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32));
if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
/* Enable toe globally only if any components are enabled. */
toe = (toe_ol != 0);
strcpy(buf, "toe");
memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32));
if ((ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) {
DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
return 0;
}
#endif /* TOE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void dhd_ethtool_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
sprintf(info->driver, "wl");
sprintf(info->version, "%lu", dhd->pub.drv_version);
}
struct ethtool_ops dhd_ethtool_ops = {
.get_drvinfo = dhd_ethtool_get_drvinfo
};
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
static int
dhd_ethtool(dhd_info_t *dhd, void *uaddr)
{
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
uint32 cmd;
#ifdef TOE
struct ethtool_value edata;
uint32 toe_cmpnt, csum_dir;
int ret;
#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* all ethtool calls start with a cmd word */
if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
return -EFAULT;
switch (cmd) {
case ETHTOOL_GDRVINFO:
/* Copy out any request driver name */
if (copy_from_user(&info, uaddr, sizeof(info)))
return -EFAULT;
strncpy(drvname, info.driver, sizeof(info.driver));
drvname[sizeof(info.driver)-1] = '\0';
/* clear struct for return */
memset(&info, 0, sizeof(info));
info.cmd = cmd;
/* if dhd requested, identify ourselves */
if (strcmp(drvname, "?dhd") == 0) {
sprintf(info.driver, "dhd");
strcpy(info.version, EPI_VERSION_STR);
}
/* otherwise, require dongle to be up */
else if (!dhd->pub.up) {
DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
return -ENODEV;
}
/* finally, report dongle driver type */
else if (dhd->pub.iswl)
sprintf(info.driver, "wl");
else
sprintf(info.driver, "xx");
sprintf(info.version, "%lu", dhd->pub.drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
(int)sizeof(drvname), drvname, info.driver));
break;
#ifdef TOE
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
return ret;
csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
edata.cmd = cmd;
edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
if (copy_to_user(uaddr, &edata, sizeof(edata)))
return -EFAULT;
break;
/* Set toe offload components in dongle */
case ETHTOOL_SRXCSUM:
case ETHTOOL_STXCSUM:
if (copy_from_user(&edata, uaddr, sizeof(edata)))
return -EFAULT;
/* Read the current settings, update and write back */
if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
return ret;
csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
if (edata.data != 0)
toe_cmpnt |= csum_dir;
else
toe_cmpnt &= ~csum_dir;
if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
else
dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
}
break;
#endif /* TOE */
default:
return -EOPNOTSUPP;
}
return 0;
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
static int
dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
dhd_ioctl_t ioc;
int bcmerror = 0;
int buflen = 0;
void *buf = NULL;
uint driver = 0;
int ifidx;
bool is_set_key_cmd;
int ret;
dhd_os_wake_lock(&dhd->pub);
/* send to dongle only if we are not waiting for reload already */
if (dhd->pub.hang_was_sent) {
DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
dhd_os_wake_lock_timeout_enable(&dhd->pub);
dhd_os_wake_unlock(&dhd->pub);
return OSL_ERROR(BCME_DONGLE_DOWN);
}
ifidx = dhd_net2idx(dhd, net);
DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
if (ifidx == DHD_BAD_IF) {
dhd_os_wake_unlock(&dhd->pub);
return -1;
}
#if defined(CONFIG_WIRELESS_EXT)
/* linux wireless extensions */
if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
/* may recurse, do NOT lock */
ret = wl_iw_ioctl(net, ifr, cmd);
dhd_os_wake_unlock(&dhd->pub);
return ret;
}
#endif /* defined(CONFIG_WIRELESS_EXT) */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
if (cmd == SIOCETHTOOL) {
ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
dhd_os_wake_unlock(&dhd->pub);
return ret;
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
if (cmd != SIOCDEVPRIVATE) {
dhd_os_wake_unlock(&dhd->pub);
return -EOPNOTSUPP;
}
memset(&ioc, 0, sizeof(ioc));
/* Copy the ioc control structure part of ioctl request */
if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
bcmerror = -BCME_BADADDR;
goto done;
}
/* Copy out any buffer passed */
if (ioc.buf) {
buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
/* optimization for direct ioctl calls from kernel */
/*
if (segment_eq(get_fs(), KERNEL_DS)) {
buf = ioc.buf;
} else {
*/
{
if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) {
bcmerror = -BCME_NOMEM;
goto done;
}
if (copy_from_user(buf, ioc.buf, buflen)) {
bcmerror = -BCME_BADADDR;
goto done;
}
}
}
/* To differentiate between wl and dhd read 4 more byes */
if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
sizeof(uint)) != 0)) {
bcmerror = -BCME_BADADDR;
goto done;
}
if (!capable(CAP_NET_ADMIN)) {
bcmerror = -BCME_EPERM;
goto done;
}
/* check for local dhd ioctl and handle it */
if (driver == DHD_IOCTL_MAGIC) {
bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
if (bcmerror)
dhd->pub.bcmerror = bcmerror;
goto done;
}
/* send to dongle (must be up, and wl) */
if (dhd->pub.busstate != DHD_BUS_DATA) {
DHD_ERROR(("%s DONGLE_DOWN\n", __FUNCTION__));
bcmerror = BCME_DONGLE_DOWN;
goto done;
}
if (!dhd->pub.iswl) {
bcmerror = BCME_DONGLE_DOWN;
goto done;
}
/* Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
* prevent M4 encryption.
*/
is_set_key_cmd = ((ioc.cmd == WLC_SET_KEY) ||
((ioc.cmd == WLC_SET_VAR) &&
!(strncmp("wsec_key", ioc.buf, 9))) ||
((ioc.cmd == WLC_SET_VAR) &&
!(strncmp("bsscfg:wsec_key", ioc.buf, 15))));
if (is_set_key_cmd) {
dhd_wait_pend8021x(net);
}
bcmerror = dhd_prot_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
done:
if ((bcmerror == -ETIMEDOUT) || ((dhd->pub.busstate == DHD_BUS_DOWN) &&
(!dhd->pub.dongle_reset))) {
DHD_ERROR(("%s: Event HANG send up\n", __FUNCTION__));
net_os_send_hang_message(net);
}
if (!bcmerror && buf && ioc.buf) {
if (copy_to_user(ioc.buf, buf, buflen))
bcmerror = -EFAULT;
}
if (buf)
MFREE(dhd->pub.osh, buf, buflen);
dhd_os_wake_unlock(&dhd->pub);
return OSL_ERROR(bcmerror);
}
static int
dhd_stop(struct net_device *net)
{
#if !defined(IGNORE_ETH0_DOWN)
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
DHD_TRACE(("%s: Enter %s\n", __FUNCTION__, net->name));
if (dhd->pub.up == 0) {
return 0;
}
/* Set state and stop OS transmissions */
dhd->pub.up = 0;
netif_stop_queue(net);
#else
DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation ...\n", __FUNCTION__));
#endif /* !defined(IGNORE_ETH0_DOWN) */
dhd->pub.hang_was_sent = 0;
OLD_MOD_DEC_USE_COUNT;
return 0;
}
static int
dhd_open(struct net_device *net)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
#ifdef TOE
uint32 toe_ol;
#endif
int ifidx;
/* Force start if ifconfig_up gets called before START command */
wl_control_wl_start(net);
ifidx = dhd_net2idx(dhd, net);
DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
if (ifidx == DHD_BAD_IF)
return -1;
if ((dhd->iflist[ifidx]) && (dhd->iflist[ifidx]->state == WLC_E_IF_DEL)) {
DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
return -1;
}
if (ifidx == 0) { /* do it only for primary eth0 */
atomic_set(&dhd->pend_8021x_cnt, 0);
memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
#ifdef TOE
/* Get current TOE mode from dongle */
if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0)
dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
else
dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
#endif
}
/* Allow transmit calls */
netif_start_queue(net);
dhd->pub.up = 1;
OLD_MOD_INC_USE_COUNT;
return 0;
}
osl_t *
dhd_osl_attach(void *pdev, uint bustype)
{
return osl_attach(pdev, bustype, TRUE);
}
void
dhd_osl_detach(osl_t *osh)
{
if (MALLOCED(osh)) {
DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
}
osl_detach(osh);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && 1
up(&dhd_registration_sem);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
int
dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
uint8 *mac_addr, uint32 flags, uint8 bssidx)
{
dhd_if_t *ifp;
DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
ASSERT(dhd && (ifidx < DHD_MAX_IFS));
ifp = dhd->iflist[ifidx];
if (!ifp && !(ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t)))) {
DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__));
return -ENOMEM;
}
memset(ifp, 0, sizeof(dhd_if_t));
ifp->info = dhd;
dhd->iflist[ifidx] = ifp;
strncpy(ifp->name, name, IFNAMSIZ);
ifp->name[IFNAMSIZ] = '\0';
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
if (handle == NULL) {
ifp->state = WLC_E_IF_ADD;
ifp->idx = ifidx;
ASSERT(dhd->sysioc_pid >= 0);
up(&dhd->sysioc_sem);
} else
ifp->net = (struct net_device *)handle;
return 0;
}
void
dhd_del_if(dhd_info_t *dhd, int ifidx)
{
dhd_if_t *ifp;
DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
ifp = dhd->iflist[ifidx];
if (!ifp) {
DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
return;
}
ifp->state = WLC_E_IF_DEL;
ifp->idx = ifidx;
ASSERT(dhd->sysioc_pid >= 0);
up(&dhd->sysioc_sem);
}
dhd_pub_t *
dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
{
dhd_info_t *dhd = NULL;
struct net_device *net;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* updates firmware nvram path if it was provided as module paramters */
if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
strcpy(fw_path, firmware_path);
if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
strcpy(nv_path, nvram_path);
/* Allocate etherdev, including space for private structure */
if (!(net = alloc_etherdev(sizeof(dhd)))) {
DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
goto fail;
}
/* Allocate primary dhd_info */
if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
goto fail;
}
memset(dhd, 0, sizeof(dhd_info_t));
/*
* Save the dhd_info into the priv
*/
memcpy(netdev_priv(net), &dhd, sizeof(dhd));
dhd->pub.osh = osh;
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
int len;
char ch;
strncpy(net->name, iface_name, IFNAMSIZ);
net->name[IFNAMSIZ - 1] = 0;
len = strlen(net->name);
ch = net->name[len - 1];
if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
strcat(net->name, "%d");
}
if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
goto fail;
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
net->open = NULL;
#else
net->netdev_ops = NULL;
#endif
mutex_init(&dhd->proto_sem);
/* Initialize other structure content */
init_waitqueue_head(&dhd->ioctl_resp_wait);
init_waitqueue_head(&dhd->ctrl_wait);
/* Initialize the spinlocks */
spin_lock_init(&dhd->sdlock);
spin_lock_init(&dhd->txqlock);
spin_lock_init(&dhd->dhd_lock);
/* Initialize Wakelock stuff */
spin_lock_init(&dhd->wl_lock);
dhd->wl_count = 0;
dhd->wl_packet = 0;
#ifdef CONFIG_HAS_WAKELOCK
wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhd->wl_start_lock);
#endif
/* Link to info module */
dhd->pub.info = dhd;
/* Link to bus module */
dhd->pub.bus = bus;
dhd->pub.hdrlen = bus_hdrlen;
/* Attach and link in the protocol */
if (dhd_prot_attach(&dhd->pub) != 0) {
DHD_ERROR(("dhd_prot_attach failed\n"));
goto fail;
}
#if defined(CONFIG_WIRELESS_EXT)
/* Attach and link in the iw */
if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
DHD_ERROR(("wl_iw_attach failed\n"));
goto fail;
}
#endif /* defined(CONFIG_WIRELESS_EXT) */
/* Set up the watchdog timer */
init_timer(&dhd->timer);
dhd->timer.data = (ulong)dhd;
dhd->timer.function = dhd_watchdog;
/* Initialize thread based operation and lock */
mutex_init(&dhd->sdsem);
if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
dhd->threads_only = TRUE;
}
else {
dhd->threads_only = FALSE;
}
if (dhd_dpc_prio >= 0) {
/* Initialize watchdog thread */
sema_init(&dhd->watchdog_sem, 0);
init_completion(&dhd->watchdog_exited);
dhd->watchdog_pid = kernel_thread(dhd_watchdog_thread, dhd, 0);
} else {
dhd->watchdog_pid = -1;
}
/* Set up the bottom half handler */
if (dhd_dpc_prio >= 0) {
/* Initialize DPC thread */
sema_init(&dhd->dpc_sem, 0);
init_completion(&dhd->dpc_exited);
dhd->dpc_pid = kernel_thread(dhd_dpc_thread, dhd, 0);
} else {
tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
dhd->dpc_pid = -1;
}
if (dhd_sysioc) {
sema_init(&dhd->sysioc_sem, 0);
init_completion(&dhd->sysioc_exited);
dhd->sysioc_pid = kernel_thread(_dhd_sysioc_thread, dhd, 0);
} else {
dhd->sysioc_pid = -1;
}
/*
* Save the dhd_info into the priv
*/
memcpy(netdev_priv(net), &dhd, sizeof(dhd));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
register_pm_notifier(&dhd_sleep_pm_notifier);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
#ifdef CONFIG_HAS_EARLYSUSPEND
dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
dhd->early_suspend.suspend = dhd_early_suspend;
dhd->early_suspend.resume = dhd_late_resume;
register_early_suspend(&dhd->early_suspend);
#endif
register_inetaddr_notifier(&dhd_notifier);
return &dhd->pub;
fail:
if (net)
free_netdev(net);
if (dhd)
dhd_detach(&dhd->pub);
return NULL;
}
int
dhd_bus_start(dhd_pub_t *dhdp)
{
int ret = -1;
dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
#ifdef EMBEDDED_PLATFORM
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
#endif /* EMBEDDED_PLATFORM */
ASSERT(dhd);
DHD_TRACE(("%s: \n", __FUNCTION__));
dhd_os_sdlock(dhdp);
/* try to download image and nvram to the dongle */
if (dhd->pub.busstate == DHD_BUS_DOWN) {
if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
fw_path, nv_path))) {
DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n",
__FUNCTION__, fw_path, nv_path));
dhd_os_sdunlock(dhdp);
return -1;
}
}
/* Start the watchdog timer */
dhd->pub.tickcnt = 0;
dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
/* Bring up the bus */
if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
dhd_os_sdunlock(dhdp);
return ret;
}
#if defined(OOB_INTR_ONLY)
/* Host registration for OOB interrupt */
if (bcmsdh_register_oob_intr(dhdp)) {
dhd->wd_timer_valid = FALSE;
del_timer_sync(&dhd->timer);
DHD_ERROR(("%s Host failed to resgister for OOB\n", __FUNCTION__));
dhd_os_sdunlock(dhdp);
return -ENODEV;
}
/* Enable oob at firmware */
dhd_enable_oob_intr(dhd->pub.bus, TRUE);
#endif /* defined(OOB_INTR_ONLY) */
/* If bus is not ready, can't come up */
if (dhd->pub.busstate != DHD_BUS_DATA) {
dhd->wd_timer_valid = FALSE;
del_timer_sync(&dhd->timer);
DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
dhd_os_sdunlock(dhdp);
return -ENODEV;
}
dhd_os_sdunlock(dhdp);
#ifdef EMBEDDED_PLATFORM
bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
bcopy(iovbuf, dhdp->eventmask, WL_EVENTING_MASK_LEN);
setbit(dhdp->eventmask, WLC_E_SET_SSID);
setbit(dhdp->eventmask, WLC_E_PRUNE);
setbit(dhdp->eventmask, WLC_E_AUTH);
setbit(dhdp->eventmask, WLC_E_REASSOC);
setbit(dhdp->eventmask, WLC_E_REASSOC_IND);
setbit(dhdp->eventmask, WLC_E_DEAUTH_IND);
setbit(dhdp->eventmask, WLC_E_DISASSOC_IND);
setbit(dhdp->eventmask, WLC_E_DISASSOC);
setbit(dhdp->eventmask, WLC_E_JOIN);
setbit(dhdp->eventmask, WLC_E_ASSOC_IND);
setbit(dhdp->eventmask, WLC_E_PSK_SUP);
setbit(dhdp->eventmask, WLC_E_LINK);
setbit(dhdp->eventmask, WLC_E_NDIS_LINK);
setbit(dhdp->eventmask, WLC_E_MIC_ERROR);
setbit(dhdp->eventmask, WLC_E_PMKID_CACHE);
setbit(dhdp->eventmask, WLC_E_TXFAIL);
setbit(dhdp->eventmask, WLC_E_JOIN_START);
setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE);
setbit(dhdp->eventmask, WLC_E_RELOAD);
#ifdef PNO_SUPPORT
setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND);
#endif /* PNO_SUPPORT */
/* enable dongle roaming event */
setbit(dhdp->eventmask, WLC_E_ROAM);
dhdp->pktfilter_count = 4;
/* Setup filter to allow only unicast */
dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00";
dhdp->pktfilter[1] = NULL;
dhdp->pktfilter[2] = NULL;
dhdp->pktfilter[3] = NULL;
#endif /* EMBEDDED_PLATFORM */
/* Bus is ready, do any protocol initialization */
if ((ret = dhd_prot_init(&dhd->pub)) < 0)
return ret;
return 0;
}
int
dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set)
{
char buf[strlen(name) + 1 + cmd_len];
int len = sizeof(buf);
wl_ioctl_t ioc;
int ret;
len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR;
ioc.buf = buf;
ioc.len = len;
ioc.set = set;
ret = dhd_prot_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
if (!set && ret >= 0)
memcpy(cmd_buf, buf, cmd_len);
return ret;
}
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
static struct net_device_ops dhd_ops_pri = {
.ndo_open = dhd_open,
.ndo_stop = dhd_stop,
.ndo_get_stats = dhd_get_stats,
.ndo_do_ioctl = dhd_ioctl_entry,
.ndo_start_xmit = dhd_start_xmit,
.ndo_set_mac_address = dhd_set_mac_address,
.ndo_set_multicast_list = dhd_set_multicast_list,
};
static struct net_device_ops dhd_ops_virt = {
.ndo_get_stats = dhd_get_stats,
.ndo_do_ioctl = dhd_ioctl_entry,
.ndo_start_xmit = dhd_start_xmit,
.ndo_set_mac_address = dhd_set_mac_address,
.ndo_set_multicast_list = dhd_set_multicast_list,
};
#endif
static int dhd_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
dhd_info_t *dhd;
dhd_pub_t *dhd_pub;
if (!ifa)
return NOTIFY_DONE;
dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
dhd_pub = &dhd->pub;
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) {
#else
if (ifa->ifa_dev->dev->open == &dhd_open) {
#endif
switch (event) {
case NETDEV_UP:
DHD_TRACE(("%s: [%s] Up IP: 0x%x\n",
__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
dhd_arp_cleanup(dhd_pub);
break;
case NETDEV_DOWN:
DHD_TRACE(("%s: [%s] Down IP: 0x%x\n",
__FUNCTION__, ifa->ifa_label, ifa->ifa_address));
dhd_arp_cleanup(dhd_pub);
break;
default:
DHD_TRACE(("%s: [%s] Event: %lu\n",
__FUNCTION__, ifa->ifa_label, event));
break;
}
}
return NOTIFY_DONE;
}
int
dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
struct net_device *net;
uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
ASSERT(dhd && dhd->iflist[ifidx]);
net = dhd->iflist[ifidx]->net;
ASSERT(net);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
net->get_stats = dhd_get_stats;
net->do_ioctl = dhd_ioctl_entry;
net->hard_start_xmit = dhd_start_xmit;
net->set_mac_address = dhd_set_mac_address;
net->set_multicast_list = dhd_set_multicast_list;
net->open = net->stop = NULL;
#else
ASSERT(!net->netdev_ops);
net->netdev_ops = &dhd_ops_virt;
#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
net->open = dhd_open;
net->stop = dhd_stop;
#else
net->netdev_ops = &dhd_ops_pri;
#endif
/*
* We have to use the primary MAC for virtual interfaces
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
}
if (ifidx == 1) {
DHD_TRACE(("%s ACCESS POINT MAC: \n", __FUNCTION__));
/* ACCESSPOINT INTERFACE CASE */
temp_addr[0] |= 0x02; /* set bit 2 , - Locally Administered address */
}
net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &dhd_ethtool_ops;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
#if defined(CONFIG_WIRELESS_EXT)
#if WIRELESS_EXT < 19
net->get_wireless_stats = dhd_get_wireless_stats;
#endif /* WIRELESS_EXT < 19 */
#if WIRELESS_EXT > 12
net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
#endif /* WIRELESS_EXT > 12 */
#endif /* defined(CONFIG_WIRELESS_EXT) */
dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
if (register_netdev(net) != 0) {
DHD_ERROR(("%s: couldn't register the net device\n", __FUNCTION__));
goto fail;
}
printf("%s: Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", net->name,
dhd->pub.mac.octet[0], dhd->pub.mac.octet[1], dhd->pub.mac.octet[2],
dhd->pub.mac.octet[3], dhd->pub.mac.octet[4], dhd->pub.mac.octet[5]);
#if defined(CONFIG_WIRELESS_EXT)
#if defined(CONFIG_FIRST_SCAN)
#ifdef SOFTAP
if (ifidx == 0)
/* Don't call for SOFTAP Interface in SOFTAP MODE */
wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#else
wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#endif /* SOFTAP */
#endif /* CONFIG_FIRST_SCAN */
#endif /* CONFIG_WIRELESS_EXT */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
up(&dhd_registration_sem);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
return 0;
fail:
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
net->open = NULL;
#else
net->netdev_ops = NULL;
#endif
return BCME_ERROR;
}
void
dhd_bus_detach(dhd_pub_t *dhdp)
{
dhd_info_t *dhd;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (dhdp) {
dhd = (dhd_info_t *)dhdp->info;
if (dhd) {
/* Stop the protocol module */
dhd_prot_stop(&dhd->pub);
/* Stop the bus module */
dhd_bus_stop(dhd->pub.bus, TRUE);
#if defined(OOB_INTR_ONLY)
bcmsdh_unregister_oob_intr();
#endif /* defined(OOB_INTR_ONLY) */
/* Clear the watchdog timer */
dhd->wd_timer_valid = FALSE;
del_timer_sync(&dhd->timer);
}
}
}
void
dhd_detach(dhd_pub_t *dhdp)
{
dhd_info_t *dhd;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (dhdp) {
dhd = (dhd_info_t *)dhdp->info;
if (dhd) {
dhd_if_t *ifp;
int i;
unregister_inetaddr_notifier(&dhd_notifier);
#if defined(CONFIG_HAS_EARLYSUSPEND)
if (dhd->early_suspend.suspend)
unregister_early_suspend(&dhd->early_suspend);
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
#if defined(CONFIG_WIRELESS_EXT)
/* Attach and link in the iw */
wl_iw_detach();
#endif
if (dhd->sysioc_pid >= 0) {
KILL_PROC(dhd->sysioc_pid, SIGTERM);
wait_for_completion(&dhd->sysioc_exited);
}
for (i = 1; i < DHD_MAX_IFS; i++)
if (dhd->iflist[i]) {
dhd->iflist[i]->state = WLC_E_IF_DEL;
dhd->iflist[i]->idx = i;
dhd_op_if(dhd->iflist[i]);
}
ifp = dhd->iflist[0];
ASSERT(ifp);
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31))
if (ifp->net->open) {
#else
if (ifp->net->netdev_ops == &dhd_ops_pri) {
#endif
dhd_stop(ifp->net);
unregister_netdev(ifp->net);
}
if (dhd->watchdog_pid >= 0)
{
KILL_PROC(dhd->watchdog_pid, SIGTERM);
wait_for_completion(&dhd->watchdog_exited);
}
if (dhd->dpc_pid >= 0)
{
KILL_PROC(dhd->dpc_pid, SIGTERM);
wait_for_completion(&dhd->dpc_exited);
}
else
tasklet_kill(&dhd->tasklet);
dhd_bus_detach(dhdp);
if (dhdp->prot)
dhd_prot_detach(dhdp);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
unregister_pm_notifier(&dhd_sleep_pm_notifier);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
free_netdev(ifp->net);
#ifdef CONFIG_HAS_WAKELOCK
wake_lock_destroy(&dhd->wl_wifi);
wake_lock_destroy(&dhd->wl_rxwake);
#endif
MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
}
}
}
static void __exit
dhd_module_cleanup(void)
{
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
dhd_bus_unregister();
#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
wifi_del_dev();
#endif
/* Call customer gpio to turn off power with WL_REG_ON signal */
dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
}
static int __init
dhd_module_init(void)
{
int error;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Sanity check on the module parameters */
do {
/* Both watchdog and DPC as tasklets are ok */
if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
break;
/* If both watchdog and DPC are threads, TX must be deferred */
if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
break;
DHD_ERROR(("Invalid module parameters.\n"));
return -EINVAL;
} while (0);
/* Call customer gpio to turn on power with WL_REG_ON signal */
dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
sema_init(&wifi_control_sem, 0);
error = wifi_add_dev();
if (error) {
DHD_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
goto fail_0;
}
/* Waiting callback after platform_driver_register is done or exit with error */
if (down_timeout(&wifi_control_sem, msecs_to_jiffies(5000)) != 0) {
error = -EINVAL;
DHD_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
goto fail_1;
}
#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
sema_init(&dhd_registration_sem, 0);
#endif
error = dhd_bus_register();
if (!error)
printf("\n%s\n", dhd_version);
else {
DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
goto fail_1;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
/*
* Wait till MMC sdio_register_driver callback called and made driver attach.
* It's needed to make sync up exit from dhd insmod and
* Kernel MMC sdio device callback registration
*/
if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) {
error = -EINVAL;
DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__));
goto fail_2;
}
#endif
return error;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
fail_2:
dhd_bus_unregister();
#endif
fail_1:
#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
wifi_del_dev();
fail_0:
#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
/* Call customer gpio to turn off power with WL_REG_ON signal */
dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
return error;
}
module_init(dhd_module_init);
module_exit(dhd_module_cleanup);
/*
* OS specific functions required to implement DHD driver in OS independent way
*/
int
dhd_os_proto_block(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
mutex_lock(&dhd->proto_sem);
return 1;
}
return 0;
}
int
dhd_os_proto_unblock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd) {
mutex_unlock(&dhd->proto_sem);
return 1;
}
return 0;
}
unsigned int
dhd_os_get_ioctl_resp_timeout(void)
{
return ((unsigned int)dhd_ioctl_timeout_msec);
}
void
dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
{
dhd_ioctl_timeout_msec = (int)timeout_msec;
}
int
dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
DECLARE_WAITQUEUE(wait, current);
int timeout = dhd_ioctl_timeout_msec;
/* Convert timeout in millsecond to jiffies */
/* timeout = timeout * HZ / 1000; */
timeout = msecs_to_jiffies(timeout);
/* Wait until control frame is available */
add_wait_queue(&dhd->ioctl_resp_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
smp_mb();
while (!(*condition) && (!signal_pending(current) && timeout)) {
timeout = schedule_timeout(timeout);
smp_mb();
}
if (signal_pending(current))
*pending = TRUE;
set_current_state(TASK_RUNNING);
remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
return timeout;
}
int
dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (waitqueue_active(&dhd->ioctl_resp_wait)) {
wake_up_interruptible(&dhd->ioctl_resp_wait);
}
return 0;
}
void
dhd_os_wd_timer(void *bus, uint wdtick)
{
dhd_pub_t *pub = bus;
dhd_info_t *dhd = (dhd_info_t *)pub->info;
unsigned long flags;
int del_timer_flag = FALSE;
flags = dhd_os_spin_lock(pub);
/* don't start the wd until fw is loaded */
if (pub->busstate != DHD_BUS_DOWN) {
if (wdtick) {
dhd_watchdog_ms = (uint)wdtick;
dhd->wd_timer_valid = TRUE;
/* Re arm the timer, at last watchdog period */
mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
} else if (dhd->wd_timer_valid == TRUE) {
/* Totally stop the timer */
dhd->wd_timer_valid = FALSE;
del_timer_flag = TRUE;
}
}
dhd_os_spin_unlock(pub, flags);
if (del_timer_flag) {
del_timer_sync(&dhd->timer);
}
}
void *
dhd_os_open_image(char *filename)
{
struct file *fp;
fp = filp_open(filename, O_RDONLY, 0);
/*
* 2.6.11 (FC4) supports filp_open() but later revs don't?
* Alternative:
* fp = open_namei(AT_FDCWD, filename, O_RD, 0);
* ???
*/
if (IS_ERR(fp))
fp = NULL;
return fp;
}
int
dhd_os_get_image_block(char *buf, int len, void *image)
{
struct file *fp = (struct file *)image;
int rdlen;
if (!image)
return 0;
rdlen = kernel_read(fp, fp->f_pos, buf, len);
if (rdlen > 0)
fp->f_pos += rdlen;
return rdlen;
}
void
dhd_os_close_image(void *image)
{
if (image)
filp_close((struct file *)image, NULL);
}
void
dhd_os_sdlock(dhd_pub_t *pub)
{
dhd_info_t *dhd;
dhd = (dhd_info_t *)(pub->info);
if (dhd->threads_only)
mutex_lock(&dhd->sdsem);
else
spin_lock_bh(&dhd->sdlock);
}
void
dhd_os_sdunlock(dhd_pub_t *pub)
{
dhd_info_t *dhd;
dhd = (dhd_info_t *)(pub->info);
if (dhd->threads_only)
mutex_unlock(&dhd->sdsem);
else
spin_unlock_bh(&dhd->sdlock);
}
void
dhd_os_sdlock_txq(dhd_pub_t *pub)
{
dhd_info_t *dhd;
dhd = (dhd_info_t *)(pub->info);
spin_lock_bh(&dhd->txqlock);
}
void
dhd_os_sdunlock_txq(dhd_pub_t *pub)
{
dhd_info_t *dhd;
dhd = (dhd_info_t *)(pub->info);
spin_unlock_bh(&dhd->txqlock);
}
void
dhd_os_sdlock_rxq(dhd_pub_t *pub)
{
}
void
dhd_os_sdunlock_rxq(dhd_pub_t *pub)
{
}
void
dhd_os_sdtxlock(dhd_pub_t *pub)
{
dhd_os_sdlock(pub);
}
void
dhd_os_sdtxunlock(dhd_pub_t *pub)
{
dhd_os_sdunlock(pub);
}
#ifdef DHD_USE_STATIC_BUF
void * dhd_os_prealloc(int section, unsigned long size)
{
#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
void *alloc_ptr = NULL;
if (wifi_control_data && wifi_control_data->mem_prealloc)
{
alloc_ptr = wifi_control_data->mem_prealloc(section, size);
if (alloc_ptr)
{
DHD_INFO(("success alloc section %d\n", section));
bzero(alloc_ptr, size);
return alloc_ptr;
}
}
DHD_ERROR(("can't alloc section %d\n", section));
return 0;
#else
return MALLOC(0, size);
#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
}
#endif /* DHD_USE_STATIC_BUF */
#if defined(CONFIG_WIRELESS_EXT)
struct iw_statistics *
dhd_get_wireless_stats(struct net_device *dev)
{
int res = 0;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
if (res == 0)
return &dhd->iw.wstats;
else
return NULL;
}
#endif /* defined(CONFIG_WIRELESS_EXT) */
static int
dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event, void **data)
{
int bcmerror = 0;
ASSERT(dhd != NULL);
bcmerror = wl_host_event(dhd, ifidx, pktdata, event, data);
if (bcmerror != BCME_OK)
return (bcmerror);
#if defined(CONFIG_WIRELESS_EXT)
ASSERT(dhd->iflist[*ifidx] != NULL);
if (ntoh32(event->event_type) == WLC_E_IF) {
DHD_INFO(("<0> interface:%d OP:%d don't pass to wext,"
"net_device might not be created yet\n",
*ifidx, ntoh32(event->event_type)));
return bcmerror;
}
ASSERT(dhd->iflist[*ifidx]->net != NULL);
if (dhd->iflist[*ifidx]->net)
wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
#endif /* defined(CONFIG_WIRELESS_EXT) */
return (bcmerror);
}
/* send up locally generated event */
void
dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
{
switch (ntoh32(event->event_type)) {
default:
break;
}
}
void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
struct dhd_info *dhdinfo = dhd->info;
dhd_os_sdunlock(dhd);
wait_event_interruptible_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), HZ * 2);
dhd_os_sdlock(dhd);
#endif
return;
}
void dhd_wait_event_wakeup(dhd_pub_t *dhd)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
struct dhd_info *dhdinfo = dhd->info;
if (waitqueue_active(&dhdinfo->ctrl_wait))
wake_up_interruptible(&dhdinfo->ctrl_wait);
#endif
return;
}
int
dhd_dev_reset(struct net_device *dev, uint8 flag)
{
int ret;
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
ret = dhd_bus_devreset(&dhd->pub, flag);
if (ret) {
DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
return ret;
}
DHD_ERROR(("%s: WLAN %s DONE\n", __FUNCTION__, flag ? "OFF" : "ON"));
return ret;
}
int net_os_set_suspend_disable(struct net_device *dev, int val)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd) {
ret = dhd->pub.suspend_disable_flag;
dhd->pub.suspend_disable_flag = val;
}
return ret;
}
int net_os_set_suspend(struct net_device *dev, int val)
{
int ret = 0;
#if defined(CONFIG_HAS_EARLYSUSPEND)
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
if (dhd) {
dhd_os_proto_block(&dhd->pub);
ret = dhd_set_suspend(val, &dhd->pub);
dhd_os_proto_unblock(&dhd->pub);
}
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
return ret;
}
int net_os_set_dtim_skip(struct net_device *dev, int val)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
if (dhd)
dhd->pub.dtim_skip = val;
return 0;
}
int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
char *filterp = NULL;
int ret = 0;
if (!dhd || (num == DHD_UNICAST_FILTER_NUM))
return ret;
if (num >= dhd->pub.pktfilter_count)
return -EINVAL;
if (add_remove) {
switch (num) {
case DHD_BROADCAST_FILTER_NUM:
filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
break;
case DHD_MULTICAST4_FILTER_NUM:
filterp = "102 0 0 0 0xFFFFFF 0x01005E";
break;
case DHD_MULTICAST6_FILTER_NUM:
filterp = "103 0 0 0 0xFFFF 0x3333";
break;
default:
return -EINVAL;
}
}
dhd->pub.pktfilter[num] = filterp;
return ret;
}
int net_os_set_packet_filter(struct net_device *dev, int val)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
/* Packet filtering is set only if we still in early-suspend and
* we need either to turn it ON or turn it OFF
* We can always turn it OFF in case of early-suspend, but we turn it
* back ON only if suspend_disable_flag was not set
*/
if (dhd && dhd->pub.up) {
dhd_os_proto_block(&dhd->pub);
if (dhd->pub.in_suspend) {
if (!val || (val && !dhd->pub.suspend_disable_flag))
dhd_set_packet_filter(val, &dhd->pub);
}
dhd_os_proto_unblock(&dhd->pub);
}
return ret;
}
void
dhd_dev_init_ioctl(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
dhd_preinit_ioctls(&dhd->pub);
}
#ifdef PNO_SUPPORT
/* Linux wrapper to call common dhd_pno_clean */
int
dhd_dev_pno_reset(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
return (dhd_pno_clean(&dhd->pub));
}
/* Linux wrapper to call common dhd_pno_enable */
int
dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
return (dhd_pno_enable(&dhd->pub, pfn_enabled));
}
/* Linux wrapper to call common dhd_pno_set */
int
dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
ushort scan_fr, int pno_repeat, int pno_freq_expo_max)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max));
}
/* Linux wrapper to get pno status */
int
dhd_dev_get_pno_status(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
return (dhd_pno_get_status(&dhd->pub));
}
#endif /* PNO_SUPPORT */
int net_os_send_hang_message(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd) {
if (!dhd->pub.hang_was_sent) {
dhd->pub.hang_was_sent = 1;
ret = wl_iw_send_priv_event(dev, "HANG");
}
}
return ret;
}
void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
if (dhd && dhd->pub.up)
memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
}
char *dhd_bus_country_get(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
if (dhd && (dhd->pub.dhd_cspec.ccode[0] != 0))
return dhd->pub.dhd_cspec.ccode;
return NULL;
}
void dhd_os_start_lock(dhd_pub_t *pub)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_lock(&dhd->wl_start_lock);
#endif
}
void dhd_os_start_unlock(dhd_pub_t *pub)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
mutex_unlock(&dhd->wl_start_lock);
#endif
}
static int
dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
{
return (atomic_read(&dhd->pend_8021x_cnt));
}
#define MAX_WAIT_FOR_8021X_TX 10
int
dhd_wait_pend8021x(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int timeout = 10 * HZ / 1000;
int ntimes = MAX_WAIT_FOR_8021X_TX;
int pend = dhd_get_pend_8021x_cnt(dhd);
while (ntimes && pend) {
if (pend) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(timeout);
set_current_state(TASK_RUNNING);
ntimes--;
}
pend = dhd_get_pend_8021x_cnt(dhd);
}
return pend;
}
#ifdef DHD_DEBUG
int
write_to_file(dhd_pub_t *dhd, uint8 *buf, int size)
{
int ret = 0;
struct file *fp;
mm_segment_t old_fs;
loff_t pos = 0;
/* change to KERNEL_DS address limit */
old_fs = get_fs();
set_fs(KERNEL_DS);
/* open file to write */
fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640);
if (!fp) {
printf("%s: open file error\n", __FUNCTION__);
ret = -1;
goto exit;
}
/* Write buf to file */
fp->f_op->write(fp, buf, size, &pos);
exit:
/* free buf before return */
MFREE(dhd->osh, buf, size);
/* close file before return */
if (fp)
filp_close(fp, current->files);
/* restore previous address limit */
set_fs(old_fs);
return ret;
}
#endif /* DHD_DEBUG */
int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags;
int ret = 0;
if (dhd) {
spin_lock_irqsave(&dhd->wl_lock, flags);
ret = dhd->wl_packet;
#ifdef CONFIG_HAS_WAKELOCK
if (dhd->wl_packet)
wake_lock_timeout(&dhd->wl_rxwake, HZ);
#endif
dhd->wl_packet = 0;
spin_unlock_irqrestore(&dhd->wl_lock, flags);
}
/* printk("%s: %d\n", __FUNCTION__, ret); */
return ret;
}
int net_os_wake_lock_timeout(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd)
ret = dhd_os_wake_lock_timeout(&dhd->pub);
return ret;
}
int dhd_os_wake_lock_timeout_enable(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags;
if (dhd) {
spin_lock_irqsave(&dhd->wl_lock, flags);
dhd->wl_packet = 1;
spin_unlock_irqrestore(&dhd->wl_lock, flags);
}
/* printk("%s\n",__func__); */
return 0;
}
int net_os_wake_lock_timeout_enable(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd)
ret = dhd_os_wake_lock_timeout_enable(&dhd->pub);
return ret;
}
int dhd_os_wake_lock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags;
int ret = 0;
if (dhd) {
spin_lock_irqsave(&dhd->wl_lock, flags);
#ifdef CONFIG_HAS_WAKELOCK
if (!dhd->wl_count)
wake_lock(&dhd->wl_wifi);
#endif
dhd->wl_count++;
ret = dhd->wl_count;
spin_unlock_irqrestore(&dhd->wl_lock, flags);
}
/* printk("%s: %d\n", __FUNCTION__, ret); */
return ret;
}
int net_os_wake_lock(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd)
ret = dhd_os_wake_lock(&dhd->pub);
return ret;
}
int dhd_os_wake_unlock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags;
int ret = 0;
dhd_os_wake_lock_timeout(pub);
if (dhd) {
spin_lock_irqsave(&dhd->wl_lock, flags);
if (dhd->wl_count) {
dhd->wl_count--;
#ifdef CONFIG_HAS_WAKELOCK
if (!dhd->wl_count)
wake_unlock(&dhd->wl_wifi);
#endif
ret = dhd->wl_count;
}
spin_unlock_irqrestore(&dhd->wl_lock, flags);
}
/* printk("%s: %d\n", __FUNCTION__, ret); */
return ret;
}
int net_os_wake_unlock(struct net_device *dev)
{
dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
int ret = 0;
if (dhd)
ret = dhd_os_wake_unlock(&dhd->pub);
return ret;
}
unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags = 0;
if (dhd)
spin_lock_irqsave(&dhd->dhd_lock, flags);
return flags;
}
void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
if (dhd)
spin_unlock_irqrestore(&dhd->dhd_lock, flags);
}
| gpl-2.0 |
Umang88/Radon-Kenzo | arch/h8300/kernel/signal.c | 2127 | 11059 | /*
* linux/arch/h8300/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
* and David McCullough <davidm@snapgear.com>
*
* Based on
* Linux/m68k by Hamish Macdonald
*/
/*
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
* Atari :-) Current limitation: Only one sigstack can be active at one time.
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
* signal handlers!
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/highuid.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/tracehook.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
/*
* Do a signal return; undo the signal stack.
*
* Keep the return code on the stack quadword aligned!
* That makes the cache flush below easier.
*/
struct sigframe
{
long dummy_er0;
long dummy_vector;
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
long dummy_pc;
char *pretcode;
unsigned char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
int sig;
} __attribute__((aligned(2),packed));
struct rt_sigframe
{
long dummy_er0;
long dummy_vector;
#if defined(CONFIG_CPU_H8S)
short dummy_exr;
#endif
long dummy_pc;
char *pretcode;
struct siginfo *pinfo;
void *puc;
unsigned char retcode[8];
struct siginfo info;
struct ucontext uc;
int sig;
} __attribute__((aligned(2),packed));
static inline int
restore_sigcontext(struct sigcontext *usc, int *pd0)
{
struct pt_regs *regs = current_pt_regs();
int err = 0;
unsigned int ccr;
unsigned int usp;
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */
COPY(er1);
COPY(er2);
COPY(er3);
COPY(er5);
COPY(pc);
ccr = regs->ccr & 0x10;
COPY(ccr);
#undef COPY
regs->ccr &= 0xef;
regs->ccr |= ccr;
regs->orig_er0 = -1; /* disable syscall checks */
err |= __get_user(usp, &usc->sc_usp);
wrusp(usp);
err |= __get_user(er0, &usc->sc_er0);
*pd0 = er0;
return err;
}
asmlinkage int sys_sigreturn(void)
{
unsigned long usp = rdusp();
struct sigframe *frame = (struct sigframe *)(usp - 4);
sigset_t set;
int er0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
(_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(&frame->sc, &er0))
goto badframe;
return er0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int sys_rt_sigreturn(void)
{
unsigned long usp = rdusp();
struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
sigset_t set;
int er0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(&frame->uc.uc_mcontext, &er0))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return er0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
err |= __put_user(regs->er0, &sc->sc_er0);
err |= __put_user(regs->er1, &sc->sc_er1);
err |= __put_user(regs->er2, &sc->sc_er2);
err |= __put_user(regs->er3, &sc->sc_er3);
err |= __put_user(regs->er4, &sc->sc_er4);
err |= __put_user(regs->er5, &sc->sc_er5);
err |= __put_user(regs->er6, &sc->sc_er6);
err |= __put_user(rdusp(), &sc->sc_usp);
err |= __put_user(regs->pc, &sc->sc_pc);
err |= __put_user(regs->ccr, &sc->sc_ccr);
err |= __put_user(mask, &sc->sc_mask);
return err;
}
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long usp;
/* Default to using normal stack. */
usp = rdusp();
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
}
static int setup_frame (int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe *frame;
int err = 0;
int usig;
unsigned char *ret;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
usig = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= __put_user(usig, &frame->sig);
if (err)
goto give_sigsegv;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
if (err)
goto give_sigsegv;
if (_NSIG_WORDS > 1) {
err |= copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
if (err)
goto give_sigsegv;
}
ret = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
ret = (unsigned char *)(ka->sa.sa_restorer);
else {
/* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
(unsigned long *)(frame->retcode + 0));
err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
}
/* Set up to return from userspace. */
err |= __put_user(ret, &frame->pretcode);
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->er0 = (current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig);
regs->er1 = (unsigned long)&(frame->sc);
regs->er5 = current->mm->start_data; /* GOT base */
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
int usig;
unsigned char *ret;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
usig = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= __put_user(usig, &frame->sig);
if (err)
goto give_sigsegv;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto give_sigsegv;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, rdusp());
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/* Set up to return from userspace. */
ret = frame->retcode;
if (ka->sa.sa_flags & SA_RESTORER)
ret = (unsigned char *)(ka->sa.sa_restorer);
else {
/* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
(unsigned long *)(frame->retcode + 0));
err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
}
err |= __put_user(ret, &frame->pretcode);
if (err)
goto give_sigsegv;
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
regs->er0 = (current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig);
regs->er1 = (unsigned long)&(frame->info);
regs->er2 = (unsigned long)&frame->uc;
regs->er5 = current->mm->start_data; /* GOT base */
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
struct pt_regs * regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* are we from a system call? */
if (regs->orig_er0 >= 0) {
switch (regs->er0) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->er0 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->er0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
regs->er0 = regs->orig_er0;
regs->pc -= 2;
}
}
/* set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
if (!ret)
signal_delivered(sig, info, ka, regs, 0);
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if ((regs->ccr & 0x10))
return;
current->thread.esp0 = (unsigned long) regs;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, regs);
return;
}
/* Did we come from a system call? */
if (regs->orig_er0 >= 0) {
/* Restart the system call - no handlers present */
if (regs->er0 == -ERESTARTNOHAND ||
regs->er0 == -ERESTARTSYS ||
regs->er0 == -ERESTARTNOINTR) {
regs->er0 = regs->orig_er0;
regs->pc -= 2;
}
if (regs->er0 == -ERESTART_RESTARTBLOCK){
regs->er0 = __NR_restart_syscall;
regs->pc -= 2;
}
}
/* If there's no signal to deliver, we just restore the saved mask. */
restore_saved_sigmask();
}
asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
| gpl-2.0 |
Timesys/linux-timesys | drivers/media/rc/redrat3.c | 2383 | 35620 | /*
* USB RedRat3 IR Transceiver rc-core driver
*
* Copyright (c) 2011 by Jarod Wilson <jarod@redhat.com>
* based heavily on the work of Stephen Cox, with additional
* help from RedRat Ltd.
*
* This driver began life based an an old version of the first-generation
* lirc_mceusb driver from the lirc 0.7.2 distribution. It was then
* significantly rewritten by Stephen Cox with the aid of RedRat Ltd's
* Chris Dodge.
*
* The driver was then ported to rc-core and significantly rewritten again,
* by Jarod, using the in-kernel mceusb driver as a guide, after an initial
* port effort was started by Stephen.
*
* TODO LIST:
* - fix lirc not showing repeats properly
* --
*
* The RedRat3 is a USB transceiver with both send & receive,
* with 2 separate sensors available for receive to enable
* both good long range reception for general use, and good
* short range reception when required for learning a signal.
*
* http://www.redrat.co.uk/
*
* It uses its own little protocol to communicate, the required
* parts of which are embedded within this driver.
* --
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/rc-core.h>
/* Driver Information */
#define DRIVER_VERSION "0.70"
#define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
#define DRIVER_AUTHOR2 "The Dweller, Stephen Cox"
#define DRIVER_DESC "RedRat3 USB IR Transceiver Driver"
#define DRIVER_NAME "redrat3"
/* module parameters */
#ifdef CONFIG_USB_DEBUG
static int debug = 1;
#else
static int debug;
#endif
#define RR3_DEBUG_STANDARD 0x1
#define RR3_DEBUG_FUNCTION_TRACE 0x2
#define rr3_dbg(dev, fmt, ...) \
do { \
if (debug & RR3_DEBUG_STANDARD) \
dev_info(dev, fmt, ## __VA_ARGS__); \
} while (0)
#define rr3_ftr(dev, fmt, ...) \
do { \
if (debug & RR3_DEBUG_FUNCTION_TRACE) \
dev_info(dev, fmt, ## __VA_ARGS__); \
} while (0)
/* bulk data transfer types */
#define RR3_ERROR 0x01
#define RR3_MOD_SIGNAL_IN 0x20
#define RR3_MOD_SIGNAL_OUT 0x21
/* Get the RR firmware version */
#define RR3_FW_VERSION 0xb1
#define RR3_FW_VERSION_LEN 64
/* Send encoded signal bulk-sent earlier*/
#define RR3_TX_SEND_SIGNAL 0xb3
#define RR3_SET_IR_PARAM 0xb7
#define RR3_GET_IR_PARAM 0xb8
/* Blink the red LED on the device */
#define RR3_BLINK_LED 0xb9
/* Read serial number of device */
#define RR3_READ_SER_NO 0xba
#define RR3_SER_NO_LEN 4
/* Start capture with the RC receiver */
#define RR3_RC_DET_ENABLE 0xbb
/* Stop capture with the RC receiver */
#define RR3_RC_DET_DISABLE 0xbc
/* Return the status of RC detector capture */
#define RR3_RC_DET_STATUS 0xbd
/* Reset redrat */
#define RR3_RESET 0xa0
/* Max number of lengths in the signal. */
#define RR3_IR_IO_MAX_LENGTHS 0x01
/* Periods to measure mod. freq. */
#define RR3_IR_IO_PERIODS_MF 0x02
/* Size of memory for main signal data */
#define RR3_IR_IO_SIG_MEM_SIZE 0x03
/* Delta value when measuring lengths */
#define RR3_IR_IO_LENGTH_FUZZ 0x04
/* Timeout for end of signal detection */
#define RR3_IR_IO_SIG_TIMEOUT 0x05
/* Minumum value for pause recognition. */
#define RR3_IR_IO_MIN_PAUSE 0x06
/* Clock freq. of EZ-USB chip */
#define RR3_CLK 24000000
/* Clock periods per timer count */
#define RR3_CLK_PER_COUNT 12
/* (RR3_CLK / RR3_CLK_PER_COUNT) */
#define RR3_CLK_CONV_FACTOR 2000000
/* USB bulk-in IR data endpoint address */
#define RR3_BULK_IN_EP_ADDR 0x82
/* Raw Modulated signal data value offsets */
#define RR3_PAUSE_OFFSET 0
#define RR3_FREQ_COUNT_OFFSET 4
#define RR3_NUM_PERIOD_OFFSET 6
#define RR3_MAX_LENGTHS_OFFSET 8
#define RR3_NUM_LENGTHS_OFFSET 9
#define RR3_MAX_SIGS_OFFSET 10
#define RR3_NUM_SIGS_OFFSET 12
#define RR3_REPEATS_OFFSET 14
/* Size of the fixed-length portion of the signal */
#define RR3_HEADER_LENGTH 15
#define RR3_DRIVER_MAXLENS 128
#define RR3_MAX_SIG_SIZE 512
#define RR3_MAX_BUF_SIZE \
((2 * RR3_HEADER_LENGTH) + RR3_DRIVER_MAXLENS + RR3_MAX_SIG_SIZE)
#define RR3_TIME_UNIT 50
#define RR3_END_OF_SIGNAL 0x7f
#define RR3_TX_HEADER_OFFSET 4
#define RR3_TX_TRAILER_LEN 2
#define RR3_RX_MIN_TIMEOUT 5
#define RR3_RX_MAX_TIMEOUT 2000
/* The 8051's CPUCS Register address */
#define RR3_CPUCS_REG_ADDR 0x7f92
#define USB_RR3USB_VENDOR_ID 0x112a
#define USB_RR3USB_PRODUCT_ID 0x0001
#define USB_RR3IIUSB_PRODUCT_ID 0x0005
/* table of devices that work with this driver */
static struct usb_device_id redrat3_dev_table[] = {
/* Original version of the RedRat3 */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)},
/* Second Version/release of the RedRat3 - RetRat3-II */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3IIUSB_PRODUCT_ID)},
{} /* Terminating entry */
};
/* Structure to hold all of our device specific stuff */
struct redrat3_dev {
/* core device bits */
struct rc_dev *rc;
struct device *dev;
/* save off the usb device pointer */
struct usb_device *udev;
/* the receive endpoint */
struct usb_endpoint_descriptor *ep_in;
/* the buffer to receive data */
unsigned char *bulk_in_buf;
/* urb used to read ir data */
struct urb *read_urb;
/* the send endpoint */
struct usb_endpoint_descriptor *ep_out;
/* the buffer to send data */
unsigned char *bulk_out_buf;
/* the urb used to send data */
struct urb *write_urb;
/* usb dma */
dma_addr_t dma_in;
dma_addr_t dma_out;
/* true if write urb is busy */
bool write_busy;
/* wait for the write to finish */
struct completion write_finished;
/* locks this structure */
struct mutex lock;
/* rx signal timeout timer */
struct timer_list rx_timeout;
/* Is the device currently receiving? */
bool recv_in_progress;
/* is the detector enabled*/
bool det_enabled;
/* Is the device currently transmitting?*/
bool transmitting;
/* store for current packet */
char pbuf[RR3_MAX_BUF_SIZE];
u16 pktlen;
u16 pkttype;
u16 bytes_read;
/* indicate whether we are going to reprocess
* the USB callback with a bigger buffer */
int buftoosmall;
char *datap;
u32 carrier;
char name[128];
char phys[64];
};
/* All incoming data buffers adhere to a very specific data format */
struct redrat3_signal_header {
u16 length; /* Length of data being transferred */
u16 transfer_type; /* Type of data transferred */
u32 pause; /* Pause between main and repeat signals */
u16 mod_freq_count; /* Value of timer on mod. freq. measurement */
u16 no_periods; /* No. of periods over which mod. freq. is measured */
u8 max_lengths; /* Max no. of lengths (i.e. size of array) */
u8 no_lengths; /* Actual no. of elements in lengths array */
u16 max_sig_size; /* Max no. of values in signal data array */
u16 sig_size; /* Acuto no. of values in signal data array */
u8 no_repeats; /* No. of repeats of repeat signal section */
/* Here forward is the lengths and signal data */
};
static void redrat3_dump_signal_header(struct redrat3_signal_header *header)
{
pr_info("%s:\n", __func__);
pr_info(" * length: %u, transfer_type: 0x%02x\n",
header->length, header->transfer_type);
pr_info(" * pause: %u, freq_count: %u, no_periods: %u\n",
header->pause, header->mod_freq_count, header->no_periods);
pr_info(" * lengths: %u (max: %u)\n",
header->no_lengths, header->max_lengths);
pr_info(" * sig_size: %u (max: %u)\n",
header->sig_size, header->max_sig_size);
pr_info(" * repeats: %u\n", header->no_repeats);
}
static void redrat3_dump_signal_data(char *buffer, u16 len)
{
int offset, i;
char *data_vals;
pr_info("%s:", __func__);
offset = RR3_TX_HEADER_OFFSET + RR3_HEADER_LENGTH
+ (RR3_DRIVER_MAXLENS * sizeof(u16));
/* read RR3_DRIVER_MAXLENS from ctrl msg */
data_vals = buffer + offset;
for (i = 0; i < len; i++) {
if (i % 10 == 0)
pr_cont("\n * ");
pr_cont("%02x ", *data_vals++);
}
pr_cont("\n");
}
/*
* redrat3_issue_async
*
* Issues an async read to the ir data in port..
* sets the callback to be redrat3_handle_async
*/
static void redrat3_issue_async(struct redrat3_dev *rr3)
{
int res;
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
if (!rr3->det_enabled) {
dev_warn(rr3->dev, "not issuing async read, "
"detector not enabled\n");
return;
}
memset(rr3->bulk_in_buf, 0, rr3->ep_in->wMaxPacketSize);
res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC);
if (res)
rr3_dbg(rr3->dev, "%s: receive request FAILED! "
"(res %d, len %d)\n", __func__, res,
rr3->read_urb->transfer_buffer_length);
}
static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
{
if (!rr3->transmitting && (code != 0x40))
dev_info(rr3->dev, "fw error code 0x%02x: ", code);
switch (code) {
case 0x00:
pr_cont("No Error\n");
break;
/* Codes 0x20 through 0x2f are IR Firmware Errors */
case 0x20:
pr_cont("Initial signal pulse not long enough "
"to measure carrier frequency\n");
break;
case 0x21:
pr_cont("Not enough length values allocated for signal\n");
break;
case 0x22:
pr_cont("Not enough memory allocated for signal data\n");
break;
case 0x23:
pr_cont("Too many signal repeats\n");
break;
case 0x28:
pr_cont("Insufficient memory available for IR signal "
"data memory allocation\n");
break;
case 0x29:
pr_cont("Insufficient memory available "
"for IrDa signal data memory allocation\n");
break;
/* Codes 0x30 through 0x3f are USB Firmware Errors */
case 0x30:
pr_cont("Insufficient memory available for bulk "
"transfer structure\n");
break;
/*
* Other error codes... These are primarily errors that can occur in
* the control messages sent to the redrat
*/
case 0x40:
if (!rr3->transmitting)
pr_cont("Signal capture has been terminated\n");
break;
case 0x41:
pr_cont("Attempt to set/get and unknown signal I/O "
"algorithm parameter\n");
break;
case 0x42:
pr_cont("Signal capture already started\n");
break;
default:
pr_cont("Unknown Error\n");
break;
}
}
static u32 redrat3_val_to_mod_freq(struct redrat3_signal_header *ph)
{
u32 mod_freq = 0;
if (ph->mod_freq_count != 0)
mod_freq = (RR3_CLK * ph->no_periods) /
(ph->mod_freq_count * RR3_CLK_PER_COUNT);
return mod_freq;
}
/* this function scales down the figures for the same result... */
static u32 redrat3_len_to_us(u32 length)
{
u32 biglen = length * 1000;
u32 divisor = (RR3_CLK_CONV_FACTOR) / 1000;
u32 result = (u32) (biglen / divisor);
/* don't allow zero lengths to go back, breaks lirc */
return result ? result : 1;
}
/*
* convert us back into redrat3 lengths
*
* length * 1000 length * 1000000
* ------------- = ---------------- = micro
* rr3clk / 1000 rr3clk
* 6 * 2 4 * 3 micro * rr3clk micro * rr3clk / 1000
* ----- = 4 ----- = 6 -------------- = len ---------------------
* 3 2 1000000 1000
*/
static u32 redrat3_us_to_len(u32 microsec)
{
u32 result;
u32 divisor;
microsec &= IR_MAX_DURATION;
divisor = (RR3_CLK_CONV_FACTOR / 1000);
result = (u32)(microsec * divisor) / 1000;
/* don't allow zero lengths to go back, breaks lirc */
return result ? result : 1;
}
/* timer callback to send long trailing space on receive timeout */
static void redrat3_rx_timeout(unsigned long data)
{
struct redrat3_dev *rr3 = (struct redrat3_dev *)data;
DEFINE_IR_RAW_EVENT(rawir);
rawir.pulse = false;
rawir.duration = rr3->rc->timeout;
rr3_dbg(rr3->dev, "storing trailing space with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
rr3_dbg(rr3->dev, "calling ir_raw_event_handle\n");
ir_raw_event_handle(rr3->rc);
rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n");
ir_raw_event_reset(rr3->rc);
}
static void redrat3_process_ir_data(struct redrat3_dev *rr3)
{
DEFINE_IR_RAW_EVENT(rawir);
struct redrat3_signal_header header;
struct device *dev;
int i;
unsigned long delay;
u32 mod_freq, single_len;
u16 *len_vals;
u8 *data_vals;
u32 tmp32;
u16 tmp16;
char *sig_data;
if (!rr3) {
pr_err("%s called with no context!\n", __func__);
return;
}
rr3_ftr(rr3->dev, "Entered %s\n", __func__);
dev = rr3->dev;
sig_data = rr3->pbuf;
header.length = rr3->pktlen;
header.transfer_type = rr3->pkttype;
/* Sanity check */
if (!(header.length >= RR3_HEADER_LENGTH))
dev_warn(dev, "read returned less than rr3 header len\n");
delay = usecs_to_jiffies(rr3->rc->timeout / 1000);
mod_timer(&rr3->rx_timeout, jiffies + delay);
memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32));
header.pause = be32_to_cpu(tmp32);
memcpy(&tmp16, sig_data + RR3_FREQ_COUNT_OFFSET, sizeof(tmp16));
header.mod_freq_count = be16_to_cpu(tmp16);
memcpy(&tmp16, sig_data + RR3_NUM_PERIOD_OFFSET, sizeof(tmp16));
header.no_periods = be16_to_cpu(tmp16);
header.max_lengths = sig_data[RR3_MAX_LENGTHS_OFFSET];
header.no_lengths = sig_data[RR3_NUM_LENGTHS_OFFSET];
memcpy(&tmp16, sig_data + RR3_MAX_SIGS_OFFSET, sizeof(tmp16));
header.max_sig_size = be16_to_cpu(tmp16);
memcpy(&tmp16, sig_data + RR3_NUM_SIGS_OFFSET, sizeof(tmp16));
header.sig_size = be16_to_cpu(tmp16);
header.no_repeats= sig_data[RR3_REPEATS_OFFSET];
if (debug) {
redrat3_dump_signal_header(&header);
redrat3_dump_signal_data(sig_data, header.sig_size);
}
mod_freq = redrat3_val_to_mod_freq(&header);
rr3_dbg(dev, "Got mod_freq of %u\n", mod_freq);
/* Here we pull out the 'length' values from the signal */
len_vals = (u16 *)(sig_data + RR3_HEADER_LENGTH);
data_vals = sig_data + RR3_HEADER_LENGTH +
(header.max_lengths * sizeof(u16));
/* process each rr3 encoded byte into an int */
for (i = 0; i < header.sig_size; i++) {
u16 val = len_vals[data_vals[i]];
single_len = redrat3_len_to_us((u32)be16_to_cpu(val));
/* cap the value to IR_MAX_DURATION */
single_len &= IR_MAX_DURATION;
/* we should always get pulse/space/pulse/space samples */
if (i % 2)
rawir.pulse = false;
else
rawir.pulse = true;
rawir.duration = US_TO_NS(single_len);
rr3_dbg(dev, "storing %s with duration %d (i: %d)\n",
rawir.pulse ? "pulse" : "space", rawir.duration, i);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
}
/* add a trailing space, if need be */
if (i % 2) {
rawir.pulse = false;
/* this duration is made up, and may not be ideal... */
rawir.duration = rr3->rc->timeout / 2;
rr3_dbg(dev, "storing trailing space with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
}
rr3_dbg(dev, "calling ir_raw_event_handle\n");
ir_raw_event_handle(rr3->rc);
return;
}
/* Util fn to send rr3 cmds */
static u8 redrat3_send_cmd(int cmd, struct redrat3_dev *rr3)
{
struct usb_device *udev;
u8 *data;
int res;
data = kzalloc(sizeof(u8), GFP_KERNEL);
if (!data)
return -ENOMEM;
udev = rr3->udev;
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0000, 0x0000, data, sizeof(u8), HZ * 10);
if (res < 0) {
dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d",
__func__, res, *data);
res = -EIO;
} else
res = (u8)data[0];
kfree(data);
return res;
}
/* Enables the long range detector and starts async receive */
static int redrat3_enable_detector(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
u8 ret;
rr3_ftr(dev, "Entering %s\n", __func__);
ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3);
if (ret != 0)
dev_dbg(dev, "%s: unexpected ret of %d\n",
__func__, ret);
ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3);
if (ret != 1) {
dev_err(dev, "%s: detector status: %d, should be 1\n",
__func__, ret);
return -EIO;
}
rr3->det_enabled = true;
redrat3_issue_async(rr3);
return 0;
}
/* Disables the rr3 long range detector */
static void redrat3_disable_detector(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
u8 ret;
rr3_ftr(dev, "Entering %s\n", __func__);
ret = redrat3_send_cmd(RR3_RC_DET_DISABLE, rr3);
if (ret != 0)
dev_err(dev, "%s: failure!\n", __func__);
ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3);
if (ret != 0)
dev_warn(dev, "%s: detector status: %d, should be 0\n",
__func__, ret);
rr3->det_enabled = false;
}
static inline void redrat3_delete(struct redrat3_dev *rr3,
struct usb_device *udev)
{
rr3_ftr(rr3->dev, "%s cleaning up\n", __func__);
usb_kill_urb(rr3->read_urb);
usb_kill_urb(rr3->write_urb);
usb_free_urb(rr3->read_urb);
usb_free_urb(rr3->write_urb);
usb_free_coherent(udev, rr3->ep_in->wMaxPacketSize,
rr3->bulk_in_buf, rr3->dma_in);
usb_free_coherent(udev, rr3->ep_out->wMaxPacketSize,
rr3->bulk_out_buf, rr3->dma_out);
kfree(rr3);
}
static u32 redrat3_get_timeout(struct device *dev,
struct rc_dev *rc, struct usb_device *udev)
{
u32 *tmp;
u32 timeout = MS_TO_NS(150); /* a sane default, if things go haywire */
int len, ret, pipe;
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
if (!tmp) {
dev_warn(dev, "Memory allocation faillure\n");
return timeout;
}
pipe = usb_rcvctrlpipe(udev, 0);
ret = usb_control_msg(udev, pipe, RR3_GET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5);
if (ret != len) {
dev_warn(dev, "Failed to read timeout from hardware\n");
return timeout;
}
timeout = US_TO_NS(redrat3_len_to_us(be32_to_cpu(*tmp)));
if (timeout < rc->min_timeout)
timeout = rc->min_timeout;
else if (timeout > rc->max_timeout)
timeout = rc->max_timeout;
rr3_dbg(dev, "Got timeout of %d ms\n", timeout / (1000 * 1000));
return timeout;
}
static void redrat3_reset(struct redrat3_dev *rr3)
{
struct usb_device *udev = rr3->udev;
struct device *dev = rr3->dev;
int rc, rxpipe, txpipe;
u8 *val;
int len = sizeof(u8);
rr3_ftr(dev, "Entering %s\n", __func__);
rxpipe = usb_rcvctrlpipe(udev, 0);
txpipe = usb_sndctrlpipe(udev, 0);
val = kzalloc(len, GFP_KERNEL);
if (!val) {
dev_err(dev, "Memory allocation failure\n");
return;
}
*val = 0x01;
rc = usb_control_msg(udev, rxpipe, RR3_RESET,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25);
rr3_dbg(dev, "reset returned 0x%02x\n", rc);
*val = 5;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25);
rr3_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc);
*val = RR3_DRIVER_MAXLENS;
rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
RR3_IR_IO_MAX_LENGTHS, 0, val, len, HZ * 25);
rr3_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc);
kfree(val);
}
static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
{
int rc = 0;
char *buffer;
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
buffer = kzalloc(sizeof(char) * (RR3_FW_VERSION_LEN + 1), GFP_KERNEL);
if (!buffer) {
dev_err(rr3->dev, "Memory allocation failure\n");
return;
}
rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
RR3_FW_VERSION,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, 0, buffer, RR3_FW_VERSION_LEN, HZ * 5);
if (rc >= 0)
dev_info(rr3->dev, "Firmware rev: %s", buffer);
else
dev_err(rr3->dev, "Problem fetching firmware ID\n");
kfree(buffer);
rr3_ftr(rr3->dev, "Exiting %s\n", __func__);
}
static void redrat3_read_packet_start(struct redrat3_dev *rr3, int len)
{
u16 tx_error;
u16 hdrlen;
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
/* grab the Length and type of transfer */
memcpy(&(rr3->pktlen), (unsigned char *) rr3->bulk_in_buf,
sizeof(rr3->pktlen));
memcpy(&(rr3->pkttype), ((unsigned char *) rr3->bulk_in_buf +
sizeof(rr3->pktlen)),
sizeof(rr3->pkttype));
/*data needs conversion to know what its real values are*/
rr3->pktlen = be16_to_cpu(rr3->pktlen);
rr3->pkttype = be16_to_cpu(rr3->pkttype);
switch (rr3->pkttype) {
case RR3_ERROR:
memcpy(&tx_error, ((unsigned char *)rr3->bulk_in_buf
+ (sizeof(rr3->pktlen) + sizeof(rr3->pkttype))),
sizeof(tx_error));
tx_error = be16_to_cpu(tx_error);
redrat3_dump_fw_error(rr3, tx_error);
break;
case RR3_MOD_SIGNAL_IN:
hdrlen = sizeof(rr3->pktlen) + sizeof(rr3->pkttype);
rr3->bytes_read = len;
rr3->bytes_read -= hdrlen;
rr3->datap = &(rr3->pbuf[0]);
memcpy(rr3->datap, ((unsigned char *)rr3->bulk_in_buf + hdrlen),
rr3->bytes_read);
rr3->datap += rr3->bytes_read;
rr3_dbg(rr3->dev, "bytes_read %d, pktlen %d\n",
rr3->bytes_read, rr3->pktlen);
break;
default:
rr3_dbg(rr3->dev, "ignoring packet with type 0x%02x, "
"len of %d, 0x%02x\n", rr3->pkttype, len, rr3->pktlen);
break;
}
}
static void redrat3_read_packet_continue(struct redrat3_dev *rr3, int len)
{
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
memcpy(rr3->datap, (unsigned char *)rr3->bulk_in_buf, len);
rr3->datap += len;
rr3->bytes_read += len;
rr3_dbg(rr3->dev, "bytes_read %d, pktlen %d\n",
rr3->bytes_read, rr3->pktlen);
}
/* gather IR data from incoming urb, process it when we have enough */
static int redrat3_get_ir_data(struct redrat3_dev *rr3, int len)
{
struct device *dev = rr3->dev;
int ret = 0;
rr3_ftr(dev, "Entering %s\n", __func__);
if (rr3->pktlen > RR3_MAX_BUF_SIZE) {
dev_err(rr3->dev, "error: packet larger than buffer\n");
ret = -EINVAL;
goto out;
}
if ((rr3->bytes_read == 0) &&
(len >= (sizeof(rr3->pkttype) + sizeof(rr3->pktlen)))) {
redrat3_read_packet_start(rr3, len);
} else if (rr3->bytes_read != 0) {
redrat3_read_packet_continue(rr3, len);
} else if (rr3->bytes_read == 0) {
dev_err(dev, "error: no packet data read\n");
ret = -ENODATA;
goto out;
}
if (rr3->bytes_read > rr3->pktlen) {
dev_err(dev, "bytes_read (%d) greater than pktlen (%d)\n",
rr3->bytes_read, rr3->pktlen);
ret = -EINVAL;
goto out;
} else if (rr3->bytes_read < rr3->pktlen)
/* we're still accumulating data */
return 0;
/* if we get here, we've got IR data to decode */
if (rr3->pkttype == RR3_MOD_SIGNAL_IN)
redrat3_process_ir_data(rr3);
else
rr3_dbg(dev, "discarding non-signal data packet "
"(type 0x%02x)\n", rr3->pkttype);
out:
rr3->bytes_read = 0;
rr3->pktlen = 0;
rr3->pkttype = 0;
return ret;
}
/* callback function from USB when async USB request has completed */
static void redrat3_handle_async(struct urb *urb, struct pt_regs *regs)
{
struct redrat3_dev *rr3;
if (!urb)
return;
rr3 = urb->context;
if (!rr3) {
pr_err("%s called with invalid context!\n", __func__);
usb_unlink_urb(urb);
return;
}
rr3_ftr(rr3->dev, "Entering %s\n", __func__);
if (!rr3->det_enabled) {
rr3_dbg(rr3->dev, "received a read callback but detector "
"disabled - ignoring\n");
return;
}
switch (urb->status) {
case 0:
redrat3_get_ir_data(rr3, urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_warn(rr3->dev, "Error: urb status = %d\n", urb->status);
rr3->bytes_read = 0;
rr3->pktlen = 0;
rr3->pkttype = 0;
break;
}
if (!rr3->transmitting)
redrat3_issue_async(rr3);
else
rr3_dbg(rr3->dev, "IR transmit in progress\n");
}
static void redrat3_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
{
struct redrat3_dev *rr3;
int len;
if (!urb)
return;
rr3 = urb->context;
if (rr3) {
len = urb->actual_length;
rr3_ftr(rr3->dev, "%s: called (status=%d len=%d)\n",
__func__, urb->status, len);
}
}
static u16 mod_freq_to_val(unsigned int mod_freq)
{
int mult = 6000000;
/* Clk used in mod. freq. generation is CLK24/4. */
return (u16)(65536 - (mult / mod_freq));
}
static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
{
struct redrat3_dev *rr3 = dev->priv;
rr3->carrier = carrier;
return carrier;
}
static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
{
struct redrat3_dev *rr3 = rcdev->priv;
struct device *dev = rr3->dev;
struct redrat3_signal_header header;
int i, j, count, ret, ret_len, offset;
int lencheck, cur_sample_len, pipe;
char *buffer = NULL, *sigdata = NULL;
int *sample_lens = NULL;
u32 tmpi;
u16 tmps;
u8 *datap;
u8 curlencheck = 0;
u16 *lengths_ptr;
int sendbuf_len;
rr3_ftr(dev, "Entering %s\n", __func__);
if (rr3->transmitting) {
dev_warn(dev, "%s: transmitter already in use\n", __func__);
return -EAGAIN;
}
count = n / sizeof(int);
if (count > (RR3_DRIVER_MAXLENS * 2))
return -EINVAL;
rr3->transmitting = true;
redrat3_disable_detector(rr3);
if (rr3->det_enabled) {
dev_err(dev, "%s: cannot tx while rx is enabled\n", __func__);
ret = -EIO;
goto out;
}
sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
if (!sample_lens) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < count; i++) {
for (lencheck = 0; lencheck < curlencheck; lencheck++) {
cur_sample_len = redrat3_us_to_len(txbuf[i]);
if (sample_lens[lencheck] == cur_sample_len)
break;
}
if (lencheck == curlencheck) {
cur_sample_len = redrat3_us_to_len(txbuf[i]);
rr3_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n",
i, txbuf[i], curlencheck, cur_sample_len);
if (curlencheck < 255) {
/* now convert the value to a proper
* rr3 value.. */
sample_lens[curlencheck] = cur_sample_len;
curlencheck++;
} else {
dev_err(dev, "signal too long\n");
ret = -EINVAL;
goto out;
}
}
}
sigdata = kzalloc((count + RR3_TX_TRAILER_LEN), GFP_KERNEL);
if (!sigdata) {
ret = -ENOMEM;
goto out;
}
sigdata[count] = RR3_END_OF_SIGNAL;
sigdata[count + 1] = RR3_END_OF_SIGNAL;
for (i = 0; i < count; i++) {
for (j = 0; j < curlencheck; j++) {
if (sample_lens[j] == redrat3_us_to_len(txbuf[i]))
sigdata[i] = j;
}
}
offset = RR3_TX_HEADER_OFFSET;
sendbuf_len = RR3_HEADER_LENGTH + (sizeof(u16) * RR3_DRIVER_MAXLENS)
+ count + RR3_TX_TRAILER_LEN + offset;
buffer = kzalloc(sendbuf_len, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
/* fill in our packet header */
header.length = sendbuf_len - offset;
header.transfer_type = RR3_MOD_SIGNAL_OUT;
header.pause = redrat3_len_to_us(100);
header.mod_freq_count = mod_freq_to_val(rr3->carrier);
header.no_periods = 0; /* n/a to transmit */
header.max_lengths = RR3_DRIVER_MAXLENS;
header.no_lengths = curlencheck;
header.max_sig_size = RR3_MAX_SIG_SIZE;
header.sig_size = count + RR3_TX_TRAILER_LEN;
/* we currently rely on repeat handling in the IR encoding source */
header.no_repeats = 0;
tmps = cpu_to_be16(header.length);
memcpy(buffer, &tmps, 2);
tmps = cpu_to_be16(header.transfer_type);
memcpy(buffer + 2, &tmps, 2);
tmpi = cpu_to_be32(header.pause);
memcpy(buffer + offset, &tmpi, sizeof(tmpi));
tmps = cpu_to_be16(header.mod_freq_count);
memcpy(buffer + offset + RR3_FREQ_COUNT_OFFSET, &tmps, 2);
buffer[offset + RR3_NUM_LENGTHS_OFFSET] = header.no_lengths;
tmps = cpu_to_be16(header.sig_size);
memcpy(buffer + offset + RR3_NUM_SIGS_OFFSET, &tmps, 2);
buffer[offset + RR3_REPEATS_OFFSET] = header.no_repeats;
lengths_ptr = (u16 *)(buffer + offset + RR3_HEADER_LENGTH);
for (i = 0; i < curlencheck; ++i)
lengths_ptr[i] = cpu_to_be16(sample_lens[i]);
datap = (u8 *)(buffer + offset + RR3_HEADER_LENGTH +
(sizeof(u16) * RR3_DRIVER_MAXLENS));
memcpy(datap, sigdata, (count + RR3_TX_TRAILER_LEN));
if (debug) {
redrat3_dump_signal_header(&header);
redrat3_dump_signal_data(buffer, header.sig_size);
}
pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress);
tmps = usb_bulk_msg(rr3->udev, pipe, buffer,
sendbuf_len, &ret_len, 10 * HZ);
rr3_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, tmps);
/* now tell the hardware to transmit what we sent it */
pipe = usb_rcvctrlpipe(rr3->udev, 0);
ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, 0, buffer, 2, HZ * 10);
if (ret < 0)
dev_err(dev, "Error: control msg send failed, rc %d\n", ret);
else
ret = n;
out:
kfree(sample_lens);
kfree(buffer);
kfree(sigdata);
rr3->transmitting = false;
redrat3_enable_detector(rr3);
return ret;
}
static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
struct rc_dev *rc;
int ret = -ENODEV;
u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
rc = rc_allocate_device();
if (!rc) {
dev_err(dev, "remote input dev allocation failed\n");
goto out;
}
snprintf(rr3->name, sizeof(rr3->name), "RedRat3%s "
"Infrared Remote Transceiver (%04x:%04x)",
prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "",
le16_to_cpu(rr3->udev->descriptor.idVendor), prod);
usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys));
rc->input_name = rr3->name;
rc->input_phys = rr3->phys;
usb_to_input_id(rr3->udev, &rc->input_id);
rc->dev.parent = dev;
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
rc->timeout = redrat3_get_timeout(dev, rc, rr3->udev);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_HAUPPAUGE;
ret = rc_register_device(rc);
if (ret < 0) {
dev_err(dev, "remote dev registration failed\n");
goto out;
}
return rc;
out:
rc_free_device(rc);
return NULL;
}
static int __devinit redrat3_dev_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct device *dev = &intf->dev;
struct usb_host_interface *uhi;
struct redrat3_dev *rr3;
struct usb_endpoint_descriptor *ep;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
u8 addr, attrs;
int pipe, i;
int retval = -ENOMEM;
rr3_ftr(dev, "%s called\n", __func__);
uhi = intf->cur_altsetting;
/* find our bulk-in and bulk-out endpoints */
for (i = 0; i < uhi->desc.bNumEndpoints; ++i) {
ep = &uhi->endpoint[i].desc;
addr = ep->bEndpointAddress;
attrs = ep->bmAttributes;
if ((ep_in == NULL) &&
((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
rr3_dbg(dev, "found bulk-in endpoint at 0x%02x\n",
ep->bEndpointAddress);
/* data comes in on 0x82, 0x81 is for other data... */
if (ep->bEndpointAddress == RR3_BULK_IN_EP_ADDR)
ep_in = ep;
}
if ((ep_out == NULL) &&
((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) &&
((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
rr3_dbg(dev, "found bulk-out endpoint at 0x%02x\n",
ep->bEndpointAddress);
ep_out = ep;
}
}
if (!ep_in || !ep_out) {
dev_err(dev, "Couldn't find both in and out endpoints\n");
retval = -ENODEV;
goto no_endpoints;
}
/* allocate memory for our device state and initialize it */
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
if (rr3 == NULL) {
dev_err(dev, "Memory allocation failure\n");
goto error;
}
rr3->dev = &intf->dev;
/* set up bulk-in endpoint */
rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->read_urb) {
dev_err(dev, "Read urb allocation failure\n");
goto error;
}
rr3->ep_in = ep_in;
rr3->bulk_in_buf = usb_alloc_coherent(udev, ep_in->wMaxPacketSize,
GFP_ATOMIC, &rr3->dma_in);
if (!rr3->bulk_in_buf) {
dev_err(dev, "Read buffer allocation failure\n");
goto error;
}
pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
usb_fill_bulk_urb(rr3->read_urb, udev, pipe,
rr3->bulk_in_buf, ep_in->wMaxPacketSize,
(usb_complete_t)redrat3_handle_async, rr3);
/* set up bulk-out endpoint*/
rr3->write_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rr3->write_urb) {
dev_err(dev, "Write urb allocation failure\n");
goto error;
}
rr3->ep_out = ep_out;
rr3->bulk_out_buf = usb_alloc_coherent(udev, ep_out->wMaxPacketSize,
GFP_ATOMIC, &rr3->dma_out);
if (!rr3->bulk_out_buf) {
dev_err(dev, "Write buffer allocation failure\n");
goto error;
}
pipe = usb_sndbulkpipe(udev, ep_out->bEndpointAddress);
usb_fill_bulk_urb(rr3->write_urb, udev, pipe,
rr3->bulk_out_buf, ep_out->wMaxPacketSize,
(usb_complete_t)redrat3_write_bulk_callback, rr3);
mutex_init(&rr3->lock);
rr3->udev = udev;
redrat3_reset(rr3);
redrat3_get_firmware_rev(rr3);
/* might be all we need to do? */
retval = redrat3_enable_detector(rr3);
if (retval < 0)
goto error;
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
rr3->rc = redrat3_init_rc_dev(rr3);
if (!rr3->rc)
goto error;
setup_timer(&rr3->rx_timeout, redrat3_rx_timeout, (unsigned long)rr3);
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, rr3);
rr3_ftr(dev, "Exiting %s\n", __func__);
return 0;
error:
redrat3_delete(rr3, rr3->udev);
no_endpoints:
dev_err(dev, "%s: retval = %x", __func__, retval);
return retval;
}
static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
rr3_ftr(&intf->dev, "Entering %s\n", __func__);
if (!rr3)
return;
redrat3_disable_detector(rr3);
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
redrat3_delete(rr3, udev);
rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n");
}
static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
rr3_ftr(rr3->dev, "suspend\n");
usb_kill_urb(rr3->read_urb);
return 0;
}
static int redrat3_dev_resume(struct usb_interface *intf)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
rr3_ftr(rr3->dev, "resume\n");
if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC))
return -EIO;
return 0;
}
static struct usb_driver redrat3_dev_driver = {
.name = DRIVER_NAME,
.probe = redrat3_dev_probe,
.disconnect = redrat3_dev_disconnect,
.suspend = redrat3_dev_suspend,
.resume = redrat3_dev_resume,
.reset_resume = redrat3_dev_resume,
.id_table = redrat3_dev_table
};
static int __init redrat3_dev_init(void)
{
int ret;
ret = usb_register(&redrat3_dev_driver);
if (ret < 0)
pr_err(DRIVER_NAME
": usb register failed, result = %d\n", ret);
return ret;
}
static void __exit redrat3_dev_exit(void)
{
usb_deregister(&redrat3_dev_driver);
}
module_init(redrat3_dev_init);
module_exit(redrat3_dev_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_AUTHOR(DRIVER_AUTHOR2);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, redrat3_dev_table);
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable module debug spew. 0 = no debugging (default) "
"0x1 = standard debug messages, 0x2 = function tracing debug. "
"Flag bits are addative (i.e., 0x3 for both debug types).");
| gpl-2.0 |
Pesach85/lge-kernel-omap4 | drivers/staging/mei/interrupt.c | 2383 | 44562 | /*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2011, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/jiffies.h>
#include "mei_dev.h"
#include "mei.h"
#include "hw.h"
#include "interface.h"
/**
* mei_interrupt_quick_handler - The ISR of the MEI device
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*/
irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
u32 csr_reg = mei_hcsr_read(dev);
if ((csr_reg & H_IS) != H_IS)
return IRQ_NONE;
/* clear H_IS bit in H_CSR */
mei_reg_write(dev, H_CSR, csr_reg);
return IRQ_WAKE_THREAD;
}
/**
* _mei_cmpl - processes completed operation.
*
* @cl: private data of the file object.
* @cb_pos: callback block.
*/
static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
{
if (cb_pos->major_file_operations == MEI_WRITE) {
mei_free_cb_private(cb_pos);
cb_pos = NULL;
cl->writing_state = MEI_WRITE_COMPLETE;
if (waitqueue_active(&cl->tx_wait))
wake_up_interruptible(&cl->tx_wait);
} else if (cb_pos->major_file_operations == MEI_READ &&
MEI_READING == cl->reading_state) {
cl->reading_state = MEI_READ_COMPLETE;
if (waitqueue_active(&cl->rx_wait))
wake_up_interruptible(&cl->rx_wait);
}
}
/**
* _mei_cmpl_iamthif - processes completed iamthif operation.
*
* @dev: the device structure.
* @cb_pos: callback block.
*/
static void _mei_cmpl_iamthif(struct mei_device *dev, struct mei_cl_cb *cb_pos)
{
if (dev->iamthif_canceled != 1) {
dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
dev->iamthif_stall_timer = 0;
memcpy(cb_pos->response_buffer.data,
dev->iamthif_msg_buf,
dev->iamthif_msg_buf_index);
list_add_tail(&cb_pos->cb_list,
&dev->amthi_read_complete_list.mei_cb.cb_list);
dev_dbg(&dev->pdev->dev, "amthi read completed.\n");
dev->iamthif_timer = jiffies;
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
} else {
run_next_iamthif_cmd(dev);
}
dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
wake_up_interruptible(&dev->iamthif_cl.wait);
}
/**
* mei_irq_thread_read_amthi_message - bottom half read routine after ISR to
* handle the read amthi message data processing.
*
* @complete_list: An instance of our list structure
* @dev: the device structure
* @mei_hdr: header of amthi message
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_read_amthi_message(struct mei_io_list *complete_list,
struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
struct mei_cl *cl;
struct mei_cl_cb *cb;
unsigned char *buffer;
BUG_ON(mei_hdr->me_addr != dev->iamthif_cl.me_client_id);
BUG_ON(dev->iamthif_state != MEI_IAMTHIF_READING);
buffer = (unsigned char *) (dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index);
BUG_ON(dev->iamthif_mtu < dev->iamthif_msg_buf_index + mei_hdr->length);
mei_read_slots(dev, buffer, mei_hdr->length);
dev->iamthif_msg_buf_index += mei_hdr->length;
if (!mei_hdr->msg_complete)
return 0;
dev_dbg(&dev->pdev->dev,
"amthi_message_buffer_index =%d\n",
mei_hdr->length);
dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
if (!dev->iamthif_current_cb)
return -ENODEV;
cb = dev->iamthif_current_cb;
dev->iamthif_current_cb = NULL;
cl = (struct mei_cl *)cb->file_private;
if (!cl)
return -ENODEV;
dev->iamthif_stall_timer = 0;
cb->information = dev->iamthif_msg_buf_index;
cb->read_time = jiffies;
if (dev->iamthif_ioctl && cl == &dev->iamthif_cl) {
/* found the iamthif cb */
dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
list_add_tail(&cb->cb_list,
&complete_list->mei_cb.cb_list);
}
return 0;
}
/**
* _mei_irq_thread_state_ok - checks if mei header matches file private data
*
* @cl: private data of the file object
* @mei_hdr: header of mei client message
*
* returns !=0 if matches, 0 if no match.
*/
static int _mei_irq_thread_state_ok(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr)
{
return (cl->host_client_id == mei_hdr->host_addr &&
cl->me_client_id == mei_hdr->me_addr &&
cl->state == MEI_FILE_CONNECTED &&
MEI_READ_COMPLETE != cl->reading_state);
}
/**
* mei_irq_thread_read_client_message - bottom half read routine after ISR to
* handle the read mei client message data processing.
*
* @complete_list: An instance of our list structure
* @dev: the device structure
* @mei_hdr: header of mei client message
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
unsigned char *buffer;
dev_dbg(&dev->pdev->dev, "start client msg\n");
if (!(dev->read_list.status == 0 &&
!list_empty(&dev->read_list.mei_cb.cb_list)))
goto quit;
list_for_each_entry_safe(cb_pos, cb_next,
&dev->read_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
if (cl && _mei_irq_thread_state_ok(cl, mei_hdr)) {
cl->reading_state = MEI_READING;
buffer = (unsigned char *)
(cb_pos->response_buffer.data +
cb_pos->information);
BUG_ON(cb_pos->response_buffer.size <
mei_hdr->length +
cb_pos->information);
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
dev_dbg(&dev->pdev->dev, "message overflow.\n");
list_del(&cb_pos->cb_list);
return -ENOMEM;
}
if (buffer)
mei_read_slots(dev, buffer, mei_hdr->length);
cb_pos->information += mei_hdr->length;
if (mei_hdr->msg_complete) {
cl->status = 0;
list_del(&cb_pos->cb_list);
dev_dbg(&dev->pdev->dev,
"completed read host client = %d,"
"ME client = %d, "
"data length = %lu\n",
cl->host_client_id,
cl->me_client_id,
cb_pos->information);
*(cb_pos->response_buffer.data +
cb_pos->information) = '\0';
dev_dbg(&dev->pdev->dev, "cb_pos->res_buffer - %s\n",
cb_pos->response_buffer.data);
list_add_tail(&cb_pos->cb_list,
&complete_list->mei_cb.cb_list);
}
break;
}
}
quit:
dev_dbg(&dev->pdev->dev, "message read\n");
if (!buffer) {
mei_read_slots(dev, (unsigned char *) dev->rd_msg_buf,
mei_hdr->length);
dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
*(u32 *) dev->rd_msg_buf);
}
return 0;
}
/**
* _mei_irq_thread_iamthif_read - prepares to read iamthif data.
*
* @dev: the device structure.
* @slots: free slots.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
{
if (((*slots) * sizeof(u32)) >= (sizeof(struct mei_msg_hdr)
+ sizeof(struct hbm_flow_control))) {
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control) + 3) / 4;
if (!mei_send_flow_control(dev, &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
} else {
dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
dev->iamthif_state = MEI_IAMTHIF_READING;
dev->iamthif_flow_control_pending = 0;
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
dev->mei_host_buffer_is_empty =
mei_host_buffer_is_empty(dev);
}
return 0;
} else {
return -EMSGSIZE;
}
}
/**
* _mei_irq_thread_close - processes close related operation.
*
* @dev: the device structure.
* @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_disconnect_request))) {
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_disconnect_request) + 3) / 4;
if (!mei_disconnect(dev, cl)) {
cl->status = 0;
cb_pos->information = 0;
list_move_tail(&cb_pos->cb_list,
&cmpl_list->mei_cb.cb_list);
return -EMSGSIZE;
} else {
cl->state = MEI_FILE_DISCONNECTING;
cl->status = 0;
cb_pos->information = 0;
list_move_tail(&cb_pos->cb_list,
&dev->ctrl_rd_list.mei_cb.cb_list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
}
} else {
/* return the cancel routine */
return -EBADMSG;
}
return 0;
}
/**
* is_treat_specially_client - checks if the message belongs
* to the file private data.
*
* @cl: private data of the file object
* @rs: connect response bus message
*
*/
static bool is_treat_specially_client(struct mei_cl *cl,
struct hbm_client_connect_response *rs)
{
if (cl->host_client_id == rs->host_addr &&
cl->me_client_id == rs->me_addr) {
if (!rs->status) {
cl->state = MEI_FILE_CONNECTED;
cl->status = 0;
} else {
cl->state = MEI_FILE_DISCONNECTED;
cl->status = -ENODEV;
}
cl->timer_count = 0;
return true;
}
return false;
}
/**
* mei_client_connect_response - connects to response irq routine
*
* @dev: the device structure
* @rs: connect response bus message
*/
static void mei_client_connect_response(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
dev_dbg(&dev->pdev->dev,
"connect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&(dev->wd_cl), rs)) {
dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n",
dev->wd_timeout);
dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
host_init_iamthif(dev);
return;
}
if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
if (!dev->ctrl_rd_list.status &&
!list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
list_for_each_entry_safe(cb_pos, cb_next,
&dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
if (!cl) {
list_del(&cb_pos->cb_list);
return;
}
if (MEI_IOCTL == cb_pos->major_file_operations) {
if (is_treat_specially_client(cl, rs)) {
list_del(&cb_pos->cb_list);
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
}
}
/**
* mei_client_disconnect_response - disconnects from response irq routine
*
* @dev: the device structure
* @rs: disconnect response bus message
*/
static void mei_client_disconnect_response(struct mei_device *dev,
struct hbm_client_connect_response *rs)
{
struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
dev_dbg(&dev->pdev->dev,
"disconnect_response:\n"
"ME Client = %d\n"
"Host Client = %d\n"
"Status = %d\n",
rs->me_addr,
rs->host_addr,
rs->status);
if (!dev->ctrl_rd_list.status &&
!list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
list_for_each_entry_safe(cb_pos, cb_next,
&dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
if (!cl) {
list_del(&cb_pos->cb_list);
return;
}
dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
if (cl->host_client_id == rs->host_addr &&
cl->me_client_id == rs->me_addr) {
list_del(&cb_pos->cb_list);
if (!rs->status)
cl->state = MEI_FILE_DISCONNECTED;
cl->status = 0;
cl->timer_count = 0;
break;
}
}
}
}
/**
* same_flow_addr - tells if they have the same address.
*
* @file: private data of the file object.
* @flow: flow control.
*
* returns !=0, same; 0,not.
*/
static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
{
return (cl->host_client_id == flow->host_addr &&
cl->me_client_id == flow->me_addr);
}
/**
* add_single_flow_creds - adds single buffer credentials.
*
* @file: private data ot the file object.
* @flow: flow control.
*/
static void add_single_flow_creds(struct mei_device *dev,
struct hbm_flow_control *flow)
{
struct mei_me_client *client;
int i;
for (i = 0; i < dev->num_mei_me_clients; i++) {
client = &dev->me_clients[i];
if (client && flow->me_addr == client->client_id) {
if (client->props.single_recv_buf) {
client->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
flow->me_addr);
dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
client->mei_flow_ctrl_creds);
} else {
BUG(); /* error in flow control */
}
}
}
}
/**
* mei_client_flow_control_response - flow control response irq routine
*
* @dev: the device structure
* @flow_control: flow control response bus message
*/
static void mei_client_flow_control_response(struct mei_device *dev,
struct hbm_flow_control *flow_control)
{
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
if (!flow_control->host_addr) {
/* single receive buffer */
add_single_flow_creds(dev, flow_control);
} else {
/* normal connection */
list_for_each_entry_safe(cl_pos, cl_next,
&dev->file_list, link) {
dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
cl_pos->host_client_id,
cl_pos->me_client_id);
dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
flow_control->host_addr,
flow_control->me_addr);
if (same_flow_addr(cl_pos, flow_control)) {
dev_dbg(&dev->pdev->dev, "recv ctrl msg for host %d ME %d.\n",
flow_control->host_addr,
flow_control->me_addr);
cl_pos->mei_flow_ctrl_creds++;
dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
cl_pos->mei_flow_ctrl_creds);
break;
}
}
}
}
/**
* same_disconn_addr - tells if they have the same address
*
* @file: private data of the file object.
* @disconn: disconnection request.
*
* returns !=0, same; 0,not.
*/
static int same_disconn_addr(struct mei_cl *cl,
struct hbm_client_disconnect_request *disconn)
{
return (cl->host_client_id == disconn->host_addr &&
cl->me_client_id == disconn->me_addr);
}
/**
* mei_client_disconnect_request - disconnects from request irq routine
*
* @dev: the device structure.
* @disconnect_req: disconnect request bus message.
*/
static void mei_client_disconnect_request(struct mei_device *dev,
struct hbm_client_disconnect_request *disconnect_req)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_client_connect_response *disconnect_res;
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if (same_disconn_addr(cl_pos, disconnect_req)) {
dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
disconnect_req->host_addr,
disconnect_req->me_addr);
cl_pos->state = MEI_FILE_DISCONNECTED;
cl_pos->timer_count = 0;
if (cl_pos == &dev->wd_cl) {
dev->wd_due_counter = 0;
dev->wd_pending = 0;
} else if (cl_pos == &dev->iamthif_cl)
dev->iamthif_timer = 0;
/* prepare disconnect response */
mei_hdr =
(struct mei_msg_hdr *) &dev->ext_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length =
sizeof(struct hbm_client_connect_response);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
disconnect_res =
(struct hbm_client_connect_response *)
&dev->ext_msg_buf[1];
disconnect_res->host_addr = cl_pos->host_client_id;
disconnect_res->me_addr = cl_pos->me_client_id;
*(u8 *) (&disconnect_res->cmd) =
CLIENT_DISCONNECT_RES_CMD;
disconnect_res->status = 0;
dev->extra_write_index = 2;
break;
}
}
}
/**
* mei_irq_thread_read_bus_message - bottom half read routine after ISR to
* handle the read bus message cmd processing.
*
* @dev: the device structure
* @mei_hdr: header of bus message
*/
static void mei_irq_thread_read_bus_message(struct mei_device *dev,
struct mei_msg_hdr *mei_hdr)
{
struct mei_bus_message *mei_msg;
struct hbm_host_version_response *version_res;
struct hbm_client_connect_response *connect_res;
struct hbm_client_connect_response *disconnect_res;
struct hbm_flow_control *flow_control;
struct hbm_props_response *props_res;
struct hbm_host_enum_response *enum_res;
struct hbm_client_disconnect_request *disconnect_req;
struct hbm_host_stop_request *host_stop_req;
unsigned char *buffer;
/* read the message to our buffer */
buffer = (unsigned char *) dev->rd_msg_buf;
BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
mei_read_slots(dev, buffer, mei_hdr->length);
mei_msg = (struct mei_bus_message *) buffer;
switch (*(u8 *) mei_msg) {
case HOST_START_RES_CMD:
version_res = (struct hbm_host_version_response *) mei_msg;
if (version_res->host_version_supported) {
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
if (dev->mei_state == MEI_INIT_CLIENTS &&
dev->init_clients_state == MEI_START_MESSAGE) {
dev->init_clients_timer = 0;
host_enum_clients_message(dev);
} else {
dev->recvd_msg = 0;
dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
mei_reset(dev, 1);
return;
}
} else {
dev->version = version_res->me_max_version;
/* send stop message */
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_host_stop_request);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
host_stop_req = (struct hbm_host_stop_request *)
&dev->wr_msg_buf[1];
memset(host_stop_req,
0,
sizeof(struct hbm_host_stop_request));
host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
mei_write_message(dev, mei_hdr,
(unsigned char *) (host_stop_req),
mei_hdr->length);
dev_dbg(&dev->pdev->dev, "version mismatch.\n");
return;
}
dev->recvd_msg = 1;
dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
case CLIENT_CONNECT_RES_CMD:
connect_res =
(struct hbm_client_connect_response *) mei_msg;
mei_client_connect_response(dev, connect_res);
dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res =
(struct hbm_client_connect_response *) mei_msg;
mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
case MEI_FLOW_CONTROL_CMD:
flow_control = (struct hbm_flow_control *) mei_msg;
mei_client_flow_control_response(dev, flow_control);
dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
props_res = (struct hbm_props_response *)mei_msg;
if (props_res->status || !dev->me_clients) {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
mei_reset(dev, 1);
return;
}
if (dev->me_clients[dev->me_client_presentation_num]
.client_id == props_res->address) {
dev->me_clients[dev->me_client_presentation_num].props
= props_res->client_properties;
if (dev->mei_state == MEI_INIT_CLIENTS &&
dev->init_clients_state ==
MEI_CLIENT_PROPERTIES_MESSAGE) {
dev->me_client_index++;
dev->me_client_presentation_num++;
host_client_properties(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message");
mei_reset(dev, 1);
return;
}
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message for wrong client ID\n");
mei_reset(dev, 1);
return;
}
break;
case HOST_ENUM_RES_CMD:
enum_res = (struct hbm_host_enum_response *) mei_msg;
memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
if (dev->mei_state == MEI_INIT_CLIENTS &&
dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
dev->init_clients_timer = 0;
dev->me_client_presentation_num = 0;
dev->me_client_index = 0;
allocate_me_clients_storage(dev);
dev->init_clients_state =
MEI_CLIENT_PROPERTIES_MESSAGE;
host_client_properties(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
mei_reset(dev, 1);
return;
}
break;
case HOST_STOP_RES_CMD:
dev->mei_state = MEI_DISABLED;
dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
mei_reset(dev, 1);
break;
case CLIENT_DISCONNECT_REQ_CMD:
/* search for client */
disconnect_req =
(struct hbm_client_disconnect_request *) mei_msg;
mei_client_disconnect_request(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
/* prepare stop request */
mei_hdr = (struct mei_msg_hdr *) &dev->ext_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_host_stop_request);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
host_stop_req =
(struct hbm_host_stop_request *) &dev->ext_msg_buf[1];
memset(host_stop_req, 0, sizeof(struct hbm_host_stop_request));
host_stop_req->cmd.cmd = HOST_STOP_REQ_CMD;
host_stop_req->reason = DRIVER_STOP_REQUEST;
host_stop_req->reserved[0] = 0;
host_stop_req->reserved[1] = 0;
dev->extra_write_index = 2;
break;
default:
BUG();
break;
}
}
/**
* _mei_hb_read - processes read related operation.
*
* @dev: the device structure.
* @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control) + 3) / 4;
if (!mei_send_flow_control(dev, cl)) {
cl->status = -ENODEV;
cb_pos->information = 0;
list_move_tail(&cb_pos->cb_list,
&cmpl_list->mei_cb.cb_list);
return -ENODEV;
} else {
list_move_tail(&cb_pos->cb_list,
&dev->read_list.mei_cb.cb_list);
}
} else {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
return -EBADMSG;
}
return 0;
}
/**
* _mei_irq_thread_ioctl - processes ioctl related operation.
*
* @dev: the device structure.
* @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_connect_request))) {
cl->state = MEI_FILE_CONNECTING;
*slots -= (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_client_connect_request) + 3) / 4;
if (!mei_connect(dev, cl)) {
cl->status = -ENODEV;
cb_pos->information = 0;
list_del(&cb_pos->cb_list);
return -ENODEV;
} else {
list_move_tail(&cb_pos->cb_list,
&dev->ctrl_rd_list.mei_cb.cb_list);
cl->timer_count = MEI_CONNECT_TIMEOUT;
}
} else {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
return -EBADMSG;
}
return 0;
}
/**
* _mei_irq_thread_cmpl - processes completed and no-iamthif operation.
*
* @dev: the device structure.
* @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_cmpl(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
struct mei_msg_hdr *mei_hdr;
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
(cb_pos->request_buffer.size -
cb_pos->information))) {
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->length = cb_pos->request_buffer.size -
cb_pos->information;
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
dev_dbg(&dev->pdev->dev, "cb_pos->request_buffer.size =%d"
"mei_hdr->msg_complete = %d\n",
cb_pos->request_buffer.size,
mei_hdr->msg_complete);
dev_dbg(&dev->pdev->dev, "cb_pos->information =%lu\n",
cb_pos->information);
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
mei_hdr->length);
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
if (!mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
mei_hdr->length)) {
cl->status = -ENODEV;
list_move_tail(&cb_pos->cb_list,
&cmpl_list->mei_cb.cb_list);
return -ENODEV;
} else {
if (mei_flow_ctrl_reduce(dev, cl))
return -ENODEV;
cl->status = 0;
cb_pos->information += mei_hdr->length;
list_move_tail(&cb_pos->cb_list,
&dev->write_waiting_list.mei_cb.cb_list);
}
} else if (*slots == ((dev->host_hw_state & H_CBD) >> 24)) {
/* buffer is still empty */
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->length =
(*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr->msg_complete = 0;
mei_hdr->reserved = 0;
(*slots) -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
if (!mei_write_message(dev, mei_hdr,
(unsigned char *)
(cb_pos->request_buffer.data +
cb_pos->information),
mei_hdr->length)) {
cl->status = -ENODEV;
list_move_tail(&cb_pos->cb_list,
&cmpl_list->mei_cb.cb_list);
return -ENODEV;
} else {
cb_pos->information += mei_hdr->length;
dev_dbg(&dev->pdev->dev,
"cb_pos->request_buffer.size =%d"
" mei_hdr->msg_complete = %d\n",
cb_pos->request_buffer.size,
mei_hdr->msg_complete);
dev_dbg(&dev->pdev->dev, "cb_pos->information =%lu\n",
cb_pos->information);
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
mei_hdr->length);
}
return -EMSGSIZE;
} else {
return -EBADMSG;
}
return 0;
}
/**
* _mei_irq_thread_cmpl_iamthif - processes completed iamthif operation.
*
* @dev: the device structure.
* @slots: free slots.
* @cb_pos: callback block.
* @cl: private data of the file object.
* @cmpl_list: complete list.
*
* returns 0, OK; otherwise, error.
*/
static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
struct mei_cl_cb *cb_pos,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
struct mei_msg_hdr *mei_hdr;
if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
dev->iamthif_msg_buf_size -
dev->iamthif_msg_buf_index)) {
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->length = dev->iamthif_msg_buf_size -
dev->iamthif_msg_buf_index;
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
if (!mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
dev->iamthif_state = MEI_IAMTHIF_IDLE;
cl->status = -ENODEV;
list_del(&cb_pos->cb_list);
return -ENODEV;
} else {
if (mei_flow_ctrl_reduce(dev, cl))
return -ENODEV;
dev->iamthif_msg_buf_index += mei_hdr->length;
cb_pos->information = dev->iamthif_msg_buf_index;
cl->status = 0;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev->iamthif_flow_control_pending = 1;
/* save iamthif cb sent to amthi client */
dev->iamthif_current_cb = cb_pos;
list_move_tail(&cb_pos->cb_list,
&dev->write_waiting_list.mei_cb.cb_list);
}
} else if (*slots == ((dev->host_hw_state & H_CBD) >> 24)) {
/* buffer is still empty */
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = cl->host_client_id;
mei_hdr->me_addr = cl->me_client_id;
mei_hdr->length =
(*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
mei_hdr->msg_complete = 0;
mei_hdr->reserved = 0;
*slots -= (sizeof(struct mei_msg_hdr) +
mei_hdr->length + 3) / 4;
if (!mei_write_message(dev, mei_hdr,
(dev->iamthif_msg_buf +
dev->iamthif_msg_buf_index),
mei_hdr->length)) {
cl->status = -ENODEV;
list_del(&cb_pos->cb_list);
} else {
dev->iamthif_msg_buf_index += mei_hdr->length;
}
return -EMSGSIZE;
} else {
return -EBADMSG;
}
return 0;
}
/**
* mei_irq_thread_read_handler - bottom half read routine after ISR to
* handle the read processing.
*
* @cmpl_list: An instance of our list structure
* @dev: the device structure
* @slots: slots to read.
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_read_handler(struct mei_io_list *cmpl_list,
struct mei_device *dev,
s32 *slots)
{
struct mei_msg_hdr *mei_hdr;
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
int ret = 0;
if (!dev->rd_msg_hdr) {
dev->rd_msg_hdr = mei_mecbrw_read(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
(*slots)--;
dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
}
mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
if (mei_hdr->reserved || !dev->rd_msg_hdr) {
dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
ret = -EBADMSG;
goto end;
}
if (mei_hdr->host_addr || mei_hdr->me_addr) {
list_for_each_entry_safe(cl_pos, cl_next,
&dev->file_list, link) {
dev_dbg(&dev->pdev->dev,
"list_for_each_entry_safe read host"
" client = %d, ME client = %d\n",
cl_pos->host_client_id,
cl_pos->me_client_id);
if (cl_pos->host_client_id == mei_hdr->host_addr &&
cl_pos->me_client_id == mei_hdr->me_addr)
break;
}
if (&cl_pos->link == &dev->file_list) {
dev_dbg(&dev->pdev->dev, "corrupted message header\n");
ret = -EBADMSG;
goto end;
}
}
if (((*slots) * sizeof(u32)) < mei_hdr->length) {
dev_dbg(&dev->pdev->dev,
"we can't read the message slots =%08x.\n",
*slots);
/* we can't read the message */
ret = -ERANGE;
goto end;
}
/* decide where to read the message too */
if (!mei_hdr->host_addr) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
mei_irq_thread_read_bus_message(dev, mei_hdr);
dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
(MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
(dev->iamthif_state == MEI_IAMTHIF_READING)) {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
mei_hdr->length);
ret = mei_irq_thread_read_amthi_message(cmpl_list,
dev, mei_hdr);
if (ret)
goto end;
} else {
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
ret = mei_irq_thread_read_client_message(cmpl_list,
dev, mei_hdr);
if (ret)
goto end;
}
/* reset the number of slots and header */
*slots = mei_count_full_read_slots(dev);
dev->rd_msg_hdr = 0;
if (*slots == -EOVERFLOW) {
/* overflow - reset */
dev_dbg(&dev->pdev->dev, "resetting due to slots overflow.\n");
/* set the event since message has been read */
ret = -ERANGE;
goto end;
}
end:
return ret;
}
/**
* mei_irq_thread_write_handler - bottom half write routine after
* ISR to handle the write processing.
*
* @cmpl_list: An instance of our list structure
* @dev: the device structure
* @slots: slots to write.
*
* returns 0 on success, <0 on failure.
*/
static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
struct mei_device *dev,
s32 *slots)
{
struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
struct mei_io_list *list;
int ret;
if (!mei_host_buffer_is_empty(dev)) {
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
dev->write_hang = -1;
*slots = mei_count_empty_write_slots(dev);
/* complete all waiting for write CB */
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
list = &dev->write_waiting_list;
if (!list->status && !list_empty(&list->mei_cb.cb_list)) {
list_for_each_entry_safe(cb_pos, cb_next,
&list->mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
if (cl) {
cl->status = 0;
list_del(&cb_pos->cb_list);
if (MEI_WRITING == cl->writing_state &&
(cb_pos->major_file_operations ==
MEI_WRITE) &&
(cl != &dev->iamthif_cl)) {
dev_dbg(&dev->pdev->dev,
"MEI WRITE COMPLETE\n");
cl->writing_state =
MEI_WRITE_COMPLETE;
list_add_tail(&cb_pos->cb_list,
&cmpl_list->mei_cb.cb_list);
}
if (cl == &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
if (dev->iamthif_flow_control_pending) {
ret =
_mei_irq_thread_iamthif_read(
dev, slots);
if (ret)
return ret;
}
}
}
}
}
if (dev->stop && !dev->wd_pending) {
dev->wd_stopped = 1;
wake_up_interruptible(&dev->wait_stop_wd);
return 0;
}
if (dev->extra_write_index) {
dev_dbg(&dev->pdev->dev, "extra_write_index =%d.\n",
dev->extra_write_index);
mei_write_message(dev,
(struct mei_msg_hdr *) &dev->ext_msg_buf[0],
(unsigned char *) &dev->ext_msg_buf[1],
(dev->extra_write_index - 1) * sizeof(u32));
*slots -= dev->extra_write_index;
dev->extra_write_index = 0;
}
if (dev->mei_state == MEI_ENABLED) {
if (dev->wd_pending &&
mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
else
if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
return -ENODEV;
dev->wd_pending = 0;
if (dev->wd_timeout) {
*slots -= (sizeof(struct mei_msg_hdr) +
MEI_START_WD_DATA_SIZE + 3) / 4;
dev->wd_due_counter = 2;
} else {
*slots -= (sizeof(struct mei_msg_hdr) +
MEI_WD_PARAMS_SIZE + 3) / 4;
dev->wd_due_counter = 0;
}
}
}
if (dev->stop)
return ~ENODEV;
/* complete control write list CB */
if (!dev->ctrl_wr_list.status) {
/* complete control write list CB */
dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb_pos, cb_next,
&dev->ctrl_wr_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)
cb_pos->file_private;
if (!cl) {
list_del(&cb_pos->cb_list);
return -ENODEV;
}
switch (cb_pos->major_file_operations) {
case MEI_CLOSE:
/* send disconnect message */
ret = _mei_irq_thread_close(dev, slots,
cb_pos, cl, cmpl_list);
if (ret)
return ret;
break;
case MEI_READ:
/* send flow control message */
ret = _mei_irq_thread_read(dev, slots,
cb_pos, cl, cmpl_list);
if (ret)
return ret;
break;
case MEI_IOCTL:
/* connect message */
if (!mei_other_client_is_connecting(dev,
cl))
continue;
ret = _mei_irq_thread_ioctl(dev, slots,
cb_pos, cl, cmpl_list);
if (ret)
return ret;
break;
default:
BUG();
}
}
}
/* complete write list CB */
if (!dev->write_list.status &&
!list_empty(&dev->write_list.mei_cb.cb_list)) {
dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb_pos, cb_next,
&dev->write_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
if (cl) {
if (cl != &dev->iamthif_cl) {
if (!mei_flow_ctrl_creds(dev,
cl)) {
dev_dbg(&dev->pdev->dev,
"No flow control"
" credentials for client"
" %d, not sending.\n",
cl->host_client_id);
continue;
}
ret = _mei_irq_thread_cmpl(dev, slots,
cb_pos,
cl, cmpl_list);
if (ret)
return ret;
} else if (cl == &dev->iamthif_cl) {
/* IAMTHIF IOCTL */
dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
if (!mei_flow_ctrl_creds(dev,
cl)) {
dev_dbg(&dev->pdev->dev,
"No flow control"
" credentials for amthi"
" client %d.\n",
cl->host_client_id);
continue;
}
ret = _mei_irq_thread_cmpl_iamthif(dev,
slots,
cb_pos,
cl,
cmpl_list);
if (ret)
return ret;
}
}
}
}
return 0;
}
/**
* mei_timer - timer function.
*
* @work: pointer to the work_struct structure
*
* NOTE: This function is called by timer interrupt work
*/
void mei_wd_timer(struct work_struct *work)
{
unsigned long timeout;
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
struct list_head *amthi_complete_list = NULL;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
struct mei_device *dev = container_of(work,
struct mei_device, wd_work.work);
mutex_lock(&dev->device_lock);
if (dev->mei_state != MEI_ENABLED) {
if (dev->mei_state == MEI_INIT_CLIENTS) {
if (dev->init_clients_timer) {
if (--dev->init_clients_timer == 0) {
dev_dbg(&dev->pdev->dev, "IMEI reset due to init clients timeout ,init clients state = %d.\n",
dev->init_clients_state);
mei_reset(dev, 1);
}
}
}
goto out;
}
/*** connect/disconnect timeouts ***/
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if (cl_pos->timer_count) {
if (--cl_pos->timer_count == 0) {
dev_dbg(&dev->pdev->dev, "HECI reset due to connect/disconnect timeout.\n");
mei_reset(dev, 1);
goto out;
}
}
}
if (dev->wd_cl.state != MEI_FILE_CONNECTED)
goto out;
/* Watchdog */
if (dev->wd_due_counter && !dev->wd_bypass) {
if (--dev->wd_due_counter == 0) {
if (dev->mei_host_buffer_is_empty &&
mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
dev->mei_host_buffer_is_empty = 0;
dev_dbg(&dev->pdev->dev, "send watchdog.\n");
if (mei_wd_send(dev))
dev_dbg(&dev->pdev->dev, "wd send failed.\n");
else
if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
goto out;
if (dev->wd_timeout)
dev->wd_due_counter = 2;
else
dev->wd_due_counter = 0;
} else
dev->wd_pending = 1;
}
}
if (dev->iamthif_stall_timer) {
if (--dev->iamthif_stall_timer == 0) {
dev_dbg(&dev->pdev->dev, "reseting because of hang to amthi.\n");
mei_reset(dev, 1);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
dev->iamthif_canceled = 0;
dev->iamthif_ioctl = 1;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
if (dev->iamthif_current_cb)
mei_free_cb_private(dev->iamthif_current_cb);
dev->iamthif_file_object = NULL;
dev->iamthif_current_cb = NULL;
run_next_iamthif_cmd(dev);
}
}
if (dev->iamthif_timer) {
timeout = dev->iamthif_timer +
msecs_to_jiffies(IAMTHIF_READ_TIMER);
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
if (time_after(jiffies, timeout)) {
/*
* User didn't read the AMTHI data on time (15sec)
* freeing AMTHI for other requests
*/
dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
amthi_complete_list = &dev->amthi_read_complete_list.
mei_cb.cb_list;
if (!list_empty(amthi_complete_list)) {
list_for_each_entry_safe(cb_pos, cb_next,
amthi_complete_list,
cb_list) {
cl_pos = cb_pos->file_object->private_data;
/* Finding the AMTHI entry. */
if (cl_pos == &dev->iamthif_cl)
list_del(&cb_pos->cb_list);
}
}
if (dev->iamthif_current_cb)
mei_free_cb_private(dev->iamthif_current_cb);
dev->iamthif_file_object->private_data = NULL;
dev->iamthif_file_object = NULL;
dev->iamthif_current_cb = NULL;
dev->iamthif_timer = 0;
run_next_iamthif_cmd(dev);
}
}
out:
schedule_delayed_work(&dev->wd_work, 2 * HZ);
mutex_unlock(&dev->device_lock);
}
/**
* mei_interrupt_thread_handler - function called after ISR to handle the interrupt
* processing.
*
* @irq: The irq number
* @dev_id: pointer to the device structure
*
* returns irqreturn_t
*
*/
irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
{
struct mei_device *dev = (struct mei_device *) dev_id;
struct mei_io_list complete_list;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
struct mei_cl *cl;
s32 slots;
int rets;
bool bus_message_received;
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
mei_initialize_list(&complete_list, dev);
dev->host_hw_state = mei_hcsr_read(dev);
dev->me_hw_state = mei_mecsr_read(dev);
/* check if ME wants a reset */
if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
dev->mei_state != MEI_RESETING &&
dev->mei_state != MEI_INITIALIZING) {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
/* check if we need to start the dev */
if ((dev->host_hw_state & H_RDY) == 0) {
if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
dev->host_hw_state |= (H_IE | H_IG | H_RDY);
mei_hcsr_set(dev);
dev->mei_state = MEI_INIT_CLIENTS;
dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
/* link is established
* start sending messages.
*/
host_start_message(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
} else {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
}
/* check slots avalable for reading */
slots = mei_count_full_read_slots(dev);
dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
slots, dev->extra_write_index);
while (slots > 0 && !dev->extra_write_index) {
dev_dbg(&dev->pdev->dev, "slots =%08x extra_write_index =%08x.\n",
slots, dev->extra_write_index);
dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
if (rets)
goto end;
}
rets = mei_irq_thread_write_handler(&complete_list, dev, &slots);
end:
dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
dev->host_hw_state = mei_hcsr_read(dev);
dev->mei_host_buffer_is_empty = mei_host_buffer_is_empty(dev);
bus_message_received = false;
if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
bus_message_received = true;
}
mutex_unlock(&dev->device_lock);
if (bus_message_received) {
dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
if (complete_list.status || list_empty(&complete_list.mei_cb.cb_list))
return IRQ_HANDLED;
list_for_each_entry_safe(cb_pos, cb_next,
&complete_list.mei_cb.cb_list, cb_list) {
cl = (struct mei_cl *)cb_pos->file_private;
list_del(&cb_pos->cb_list);
if (cl) {
if (cl != &dev->iamthif_cl) {
dev_dbg(&dev->pdev->dev, "completing call back.\n");
_mei_cmpl(cl, cb_pos);
cb_pos = NULL;
} else if (cl == &dev->iamthif_cl) {
_mei_cmpl_iamthif(dev, cb_pos);
}
}
}
return IRQ_HANDLED;
}
| gpl-2.0 |
cmenard/kernel_smdk4412 | net/dccp/input.c | 2639 | 22019 | /*
* net/dccp/input.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
sk->sk_data_ready(sk, 0);
}
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
{
/*
* On receiving Close/CloseReq, both RD/WR shutdown are performed.
* RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
* receiving the closing segment, but there is no guarantee that such
* data will be processed at all.
*/
sk->sk_shutdown = SHUTDOWN_MASK;
sock_set_flag(sk, SOCK_DONE);
dccp_enqueue_skb(sk, skb);
}
static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
switch (sk->sk_state) {
/*
* We ignore Close when received in one of the following states:
* - CLOSED (may be a late or duplicate packet)
* - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier)
* - RESPOND (already handled by dccp_check_req)
*/
case DCCP_CLOSING:
/*
* Simultaneous-close: receiving a Close after sending one. This
* can happen if both client and server perform active-close and
* will result in an endless ping-pong of crossing and retrans-
* mitted Close packets, which only terminates when one of the
* nodes times out (min. 64 seconds). Quicker convergence can be
* achieved when one of the nodes acts as tie-breaker.
* This is ok as both ends are done with data transfer and each
* end is just waiting for the other to acknowledge termination.
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
break;
/* fall through */
case DCCP_REQUESTING:
case DCCP_ACTIVE_CLOSEREQ:
dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
dccp_done(sk);
break;
case DCCP_OPEN:
case DCCP_PARTOPEN:
/* Give waiting application a chance to read pending data */
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
/* fall through */
case DCCP_PASSIVE_CLOSE:
/*
* Retransmitted Close: we have already enqueued the first one.
*/
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
}
return queued;
}
static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
/*
* Step 7: Check for unexpected packet types
* If (S.is_server and P.type == CloseReq)
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
return queued;
}
/* Step 13: process relevant Client states < CLOSEREQ */
switch (sk->sk_state) {
case DCCP_REQUESTING:
dccp_send_close(sk, 0);
dccp_set_state(sk, DCCP_CLOSING);
break;
case DCCP_OPEN:
case DCCP_PARTOPEN:
/* Give waiting application a chance to read pending data */
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
/* fall through */
case DCCP_PASSIVE_CLOSEREQ:
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
}
return queued;
}
static u16 dccp_reset_code_convert(const u8 code)
{
const u16 error_code[] = {
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
[DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
[DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
[DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
[DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
[DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
[DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
[DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
};
return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
}
static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
{
u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
sk->sk_err = err;
/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
dccp_fin(sk, skb);
if (err && !sock_flag(sk, SOCK_DEAD))
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
}
static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
{
struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
if (av == NULL)
return;
if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
dccp_ackvec_input(av, skb);
}
static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_sock *dp = dccp_sk(sk);
/* Don't deliver to RX CCID when node has shut down read end. */
if (!(sk->sk_shutdown & RCV_SHUTDOWN))
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
/*
* Until the TX queue has been drained, we can not honour SHUT_WR, since
* we need received feedback as input to adjust congestion control.
*/
if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
}
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
struct dccp_sock *dp = dccp_sk(sk);
u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
/*
* Step 5: Prepare sequence numbers for Sync
* If P.type == Sync or P.type == SyncAck,
* If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
* / * P is valid, so update sequence number variables
* accordingly. After this update, P will pass the tests
* in Step 6. A SyncAck is generated if necessary in
* Step 15 * /
* Update S.GSR, S.SWL, S.SWH
* Otherwise,
* Drop packet and return
*/
if (dh->dccph_type == DCCP_PKT_SYNC ||
dh->dccph_type == DCCP_PKT_SYNCACK) {
if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
dccp_update_gsr(sk, seqno);
else
return -1;
}
/*
* Step 6: Check sequence numbers
* Let LSWL = S.SWL and LAWL = S.AWL
* If P.type == CloseReq or P.type == Close or P.type == Reset,
* LSWL := S.GSR + 1, LAWL := S.GAR
* If LSWL <= P.seqno <= S.SWH
* and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
* Update S.GSR, S.SWL, S.SWH
* If P.type != Sync,
* Update S.GAR
*/
lswl = dp->dccps_swl;
lawl = dp->dccps_awl;
if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
dh->dccph_type == DCCP_PKT_CLOSE ||
dh->dccph_type == DCCP_PKT_RESET) {
lswl = ADD48(dp->dccps_gsr, 1);
lawl = dp->dccps_gar;
}
if (between48(seqno, lswl, dp->dccps_swh) &&
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
between48(ackno, lawl, dp->dccps_awh))) {
dccp_update_gsr(sk, seqno);
if (dh->dccph_type != DCCP_PKT_SYNC &&
ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
after48(ackno, dp->dccps_gar))
dp->dccps_gar = ackno;
} else {
unsigned long now = jiffies;
/*
* Step 6: Check sequence numbers
* Otherwise,
* If P.type == Reset,
* Send Sync packet acknowledging S.GSR
* Otherwise,
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*
* These Syncs are rate-limited as per RFC 4340, 7.5.4:
* at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
*/
if (time_before(now, (dp->dccps_rate_last +
sysctl_dccp_sync_ratelimit)))
return -1;
DCCP_WARN("Step 6 failed for %s packet, "
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
"(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
"sending SYNC...\n", dccp_packet_name(dh->dccph_type),
(unsigned long long) lswl, (unsigned long long) seqno,
(unsigned long long) dp->dccps_swh,
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
: "exists",
(unsigned long long) lawl, (unsigned long long) ackno,
(unsigned long long) dp->dccps_awh);
dp->dccps_rate_last = now;
if (dh->dccph_type == DCCP_PKT_RESET)
seqno = dp->dccps_gsr;
dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
return -1;
}
return 0;
}
static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
switch (dccp_hdr(skb)->dccph_type) {
case DCCP_PKT_DATAACK:
case DCCP_PKT_DATA:
/*
* FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
* - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
* - sk_receive_queue is full, use Code 2, "Receive Buffer"
*/
dccp_enqueue_skb(sk, skb);
return 0;
case DCCP_PKT_ACK:
goto discard;
case DCCP_PKT_RESET:
/*
* Step 9: Process Reset
* If P.type == Reset,
* Tear down connection
* S.state := TIMEWAIT
* Set TIMEWAIT timer
* Drop packet and return
*/
dccp_rcv_reset(sk, skb);
return 0;
case DCCP_PKT_CLOSEREQ:
if (dccp_rcv_closereq(sk, skb))
return 0;
goto discard;
case DCCP_PKT_CLOSE:
if (dccp_rcv_close(sk, skb))
return 0;
goto discard;
case DCCP_PKT_REQUEST:
/* Step 7
* or (S.is_server and P.type == Response)
* or (S.is_client and P.type == Request)
* or (S.state >= OPEN and P.type == Request
* and P.seqno >= S.OSR)
* or (S.state >= OPEN and P.type == Response
* and P.seqno >= S.OSR)
* or (S.state == RESPOND and P.type == Data),
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
if (dp->dccps_role != DCCP_ROLE_LISTEN)
goto send_sync;
goto check_seq;
case DCCP_PKT_RESPONSE:
if (dp->dccps_role != DCCP_ROLE_CLIENT)
goto send_sync;
check_seq:
if (dccp_delta_seqno(dp->dccps_osr,
DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
send_sync:
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNC);
}
break;
case DCCP_PKT_SYNC:
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNCACK);
/*
* From RFC 4340, sec. 5.7
*
* As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
* MAY have non-zero-length application data areas, whose
* contents receivers MUST ignore.
*/
goto discard;
}
DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
discard:
__kfree_skb(skb);
return 0;
}
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned len)
{
if (dccp_check_seqno(sk, skb))
goto discard;
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
__kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_rcv_established);
static int dccp_rcv_request_sent_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
const unsigned len)
{
/*
* Step 4: Prepare sequence numbers in REQUEST
* If S.state == REQUEST,
* If (P.type == Response or P.type == Reset)
* and S.AWL <= P.ackno <= S.AWH,
* / * Set sequence number variables corresponding to the
* other endpoint, so P will pass the tests in Step 6 * /
* Set S.GSR, S.ISR, S.SWL, S.SWH
* / * Response processing continues in Step 10; Reset
* processing continues in Step 9 * /
*/
if (dh->dccph_type == DCCP_PKT_RESPONSE) {
const struct inet_connection_sock *icsk = inet_csk(sk);
struct dccp_sock *dp = dccp_sk(sk);
long tstamp = dccp_timestamp();
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
"P.ackno=%llu, S.AWH=%llu\n",
(unsigned long long)dp->dccps_awl,
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long)dp->dccps_awh);
goto out_invalid_packet;
}
/*
* If option processing (Step 8) failed, return 1 here so that
* dccp_v4_do_rcv() sends a Reset. The Reset code depends on
* the option type and is set in dccp_parse_options().
*/
if (dccp_parse_options(sk, NULL, skb))
return 1;
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
dp->dccps_options_received.dccpor_timestamp_echo));
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
WARN_ON(sk->sk_send_head == NULL);
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
/*
* Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect
* and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH
* is done as part of activating the feature values below, since
* these settings depend on the local/remote Sequence Window
* features, which were undefined or not confirmed until now.
*/
dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
/*
* Step 10: Process REQUEST state (second part)
* If S.state == REQUEST,
* / * If we get here, P is a valid Response from the
* server (see Step 4), and we should move to
* PARTOPEN state. PARTOPEN means send an Ack,
* don't send Data packets, retransmit Acks
* periodically, and always include any Init Cookie
* from the Response * /
* S.state := PARTOPEN
* Set PARTOPEN timer
* Continue with S.state == PARTOPEN
* / * Step 12 will send the Ack completing the
* three-way handshake * /
*/
dccp_set_state(sk, DCCP_PARTOPEN);
/*
* If feature negotiation was successful, activate features now;
* an activation failure means that this host could not activate
* one ore more features (e.g. insufficient memory), which would
* leave at least one feature in an undefined state.
*/
if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
goto unable_to_proceed;
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
icsk->icsk_accept_queue.rskq_defer_accept) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
*
* It may be deleted, but with this feature tcpdumps
* look so _wonderfully_ clever, that I was not able
* to stand against the temptation 8) --ANK
*/
/*
* OK, in DCCP we can as well do a similar trick, its
* even in the draft, but there is no need for us to
* schedule an ack here, as dccp_sendmsg does this for
* us, also stated in the draft. -acme
*/
__kfree_skb(skb);
return 0;
}
dccp_send_ack(sk);
return -1;
}
out_invalid_packet:
/* dccp_v4_do_rcv will send a reset */
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
return 1;
unable_to_proceed:
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
/*
* We mark this socket as no longer usable, so that the loop in
* dccp_sendmsg() terminates and the application gets notified.
*/
dccp_set_state(sk, DCCP_CLOSED);
sk->sk_err = ECOMM;
return 1;
}
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
const unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
int queued = 0;
switch (dh->dccph_type) {
case DCCP_PKT_RESET:
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
break;
case DCCP_PKT_DATA:
if (sk->sk_state == DCCP_RESPOND)
break;
case DCCP_PKT_DATAACK:
case DCCP_PKT_ACK:
/*
* FIXME: we should be reseting the PARTOPEN (DELACK) timer
* here but only if we haven't used the DELACK timer for
* something else, like sending a delayed ack for a TIMESTAMP
* echo, etc, for now were not clearing it, sending an extra
* ACK when there is nothing else to do in DELACK is not a big
* deal after all.
*/
/* Stop the PARTOPEN timer */
if (sk->sk_state == DCCP_PARTOPEN)
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
if (likely(sample)) {
long delta = dccp_timestamp() - sample;
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
}
dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_set_state(sk, DCCP_OPEN);
if (dh->dccph_type == DCCP_PKT_DATAACK ||
dh->dccph_type == DCCP_PKT_DATA) {
__dccp_rcv_established(sk, skb, dh, len);
queued = 1; /* packet was queued
(by __dccp_rcv_established) */
}
break;
}
return queued;
}
int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct dccp_hdr *dh, unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int old_state = sk->sk_state;
int queued = 0;
/*
* Step 3: Process LISTEN state
*
* If S.state == LISTEN,
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
* Cookies Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
skb) < 0)
return 1;
goto discard;
}
if (dh->dccph_type == DCCP_PKT_RESET)
goto discard;
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
} else if (sk->sk_state == DCCP_CLOSED) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
}
if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
if (dccp_check_seqno(sk, skb))
goto discard;
/*
* Step 8: Process options and mark acknowledgeable
*/
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
}
/*
* Step 9: Process Reset
* If P.type == Reset,
* Tear down connection
* S.state := TIMEWAIT
* Set TIMEWAIT timer
* Drop packet and return
*/
if (dh->dccph_type == DCCP_PKT_RESET) {
dccp_rcv_reset(sk, skb);
return 0;
/*
* Step 7: Check for unexpected packet types
* If (S.is_server and P.type == Response)
* or (S.is_client and P.type == Request)
* or (S.state == RESPOND and P.type == Data),
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
dh->dccph_type == DCCP_PKT_RESPONSE) ||
(dp->dccps_role == DCCP_ROLE_CLIENT &&
dh->dccph_type == DCCP_PKT_REQUEST) ||
(sk->sk_state == DCCP_RESPOND &&
dh->dccph_type == DCCP_PKT_DATA)) {
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
if (dccp_rcv_closereq(sk, skb))
return 0;
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
if (dccp_rcv_close(sk, skb))
return 0;
goto discard;
}
switch (sk->sk_state) {
case DCCP_REQUESTING:
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
if (queued >= 0)
return queued;
__kfree_skb(skb);
return 0;
case DCCP_RESPOND:
case DCCP_PARTOPEN:
queued = dccp_rcv_respond_partopen_state_process(sk, skb,
dh, len);
break;
}
if (dh->dccph_type == DCCP_PKT_ACK ||
dh->dccph_type == DCCP_PKT_DATAACK) {
switch (old_state) {
case DCCP_PARTOPEN:
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
break;
}
} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
goto discard;
}
if (!queued) {
discard:
__kfree_skb(skb);
}
return 0;
}
EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
/**
* dccp_sample_rtt - Validate and finalise computation of RTT sample
* @delta: number of microseconds between packet and acknowledgment
* The routine is kept generic to work in different contexts. It should be
* called immediately when the ACK used for the RTT sample arrives.
*/
u32 dccp_sample_rtt(struct sock *sk, long delta)
{
/* dccpor_elapsed_time is either zeroed out or set and > 0 */
delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
if (unlikely(delta <= 0)) {
DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
return DCCP_SANE_RTT_MIN;
}
if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
DCCP_WARN("RTT sample %ld too large, using max\n", delta);
return DCCP_SANE_RTT_MAX;
}
return delta;
}
| gpl-2.0 |
zarboz/Beastmode_LTE_2.0 | net/dccp/input.c | 2639 | 22019 | /*
* net/dccp/input.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8;
static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
sk->sk_data_ready(sk, 0);
}
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
{
/*
* On receiving Close/CloseReq, both RD/WR shutdown are performed.
* RFC 4340, 8.3 says that we MAY send further Data/DataAcks after
* receiving the closing segment, but there is no guarantee that such
* data will be processed at all.
*/
sk->sk_shutdown = SHUTDOWN_MASK;
sock_set_flag(sk, SOCK_DONE);
dccp_enqueue_skb(sk, skb);
}
static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
switch (sk->sk_state) {
/*
* We ignore Close when received in one of the following states:
* - CLOSED (may be a late or duplicate packet)
* - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier)
* - RESPOND (already handled by dccp_check_req)
*/
case DCCP_CLOSING:
/*
* Simultaneous-close: receiving a Close after sending one. This
* can happen if both client and server perform active-close and
* will result in an endless ping-pong of crossing and retrans-
* mitted Close packets, which only terminates when one of the
* nodes times out (min. 64 seconds). Quicker convergence can be
* achieved when one of the nodes acts as tie-breaker.
* This is ok as both ends are done with data transfer and each
* end is just waiting for the other to acknowledge termination.
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
break;
/* fall through */
case DCCP_REQUESTING:
case DCCP_ACTIVE_CLOSEREQ:
dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
dccp_done(sk);
break;
case DCCP_OPEN:
case DCCP_PARTOPEN:
/* Give waiting application a chance to read pending data */
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
/* fall through */
case DCCP_PASSIVE_CLOSE:
/*
* Retransmitted Close: we have already enqueued the first one.
*/
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
}
return queued;
}
static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
{
int queued = 0;
/*
* Step 7: Check for unexpected packet types
* If (S.is_server and P.type == CloseReq)
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
return queued;
}
/* Step 13: process relevant Client states < CLOSEREQ */
switch (sk->sk_state) {
case DCCP_REQUESTING:
dccp_send_close(sk, 0);
dccp_set_state(sk, DCCP_CLOSING);
break;
case DCCP_OPEN:
case DCCP_PARTOPEN:
/* Give waiting application a chance to read pending data */
queued = 1;
dccp_fin(sk, skb);
dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
/* fall through */
case DCCP_PASSIVE_CLOSEREQ:
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
}
return queued;
}
static u16 dccp_reset_code_convert(const u8 code)
{
const u16 error_code[] = {
[DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */
[DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */
[DCCP_RESET_CODE_ABORTED] = ECONNRESET,
[DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED,
[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
[DCCP_RESET_CODE_TOO_BUSY] = EUSERS,
[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,
[DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG,
[DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR,
[DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC,
[DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ,
[DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP,
};
return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
}
static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
{
u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);
sk->sk_err = err;
/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
dccp_fin(sk, skb);
if (err && !sock_flag(sk, SOCK_DEAD))
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
}
static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
{
struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
if (av == NULL)
return;
if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
dccp_ackvec_input(av, skb);
}
static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_sock *dp = dccp_sk(sk);
/* Don't deliver to RX CCID when node has shut down read end. */
if (!(sk->sk_shutdown & RCV_SHUTDOWN))
ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
/*
* Until the TX queue has been drained, we can not honour SHUT_WR, since
* we need received feedback as input to adjust congestion control.
*/
if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
}
static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
struct dccp_sock *dp = dccp_sk(sk);
u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
/*
* Step 5: Prepare sequence numbers for Sync
* If P.type == Sync or P.type == SyncAck,
* If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
* / * P is valid, so update sequence number variables
* accordingly. After this update, P will pass the tests
* in Step 6. A SyncAck is generated if necessary in
* Step 15 * /
* Update S.GSR, S.SWL, S.SWH
* Otherwise,
* Drop packet and return
*/
if (dh->dccph_type == DCCP_PKT_SYNC ||
dh->dccph_type == DCCP_PKT_SYNCACK) {
if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
dccp_update_gsr(sk, seqno);
else
return -1;
}
/*
* Step 6: Check sequence numbers
* Let LSWL = S.SWL and LAWL = S.AWL
* If P.type == CloseReq or P.type == Close or P.type == Reset,
* LSWL := S.GSR + 1, LAWL := S.GAR
* If LSWL <= P.seqno <= S.SWH
* and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
* Update S.GSR, S.SWL, S.SWH
* If P.type != Sync,
* Update S.GAR
*/
lswl = dp->dccps_swl;
lawl = dp->dccps_awl;
if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
dh->dccph_type == DCCP_PKT_CLOSE ||
dh->dccph_type == DCCP_PKT_RESET) {
lswl = ADD48(dp->dccps_gsr, 1);
lawl = dp->dccps_gar;
}
if (between48(seqno, lswl, dp->dccps_swh) &&
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
between48(ackno, lawl, dp->dccps_awh))) {
dccp_update_gsr(sk, seqno);
if (dh->dccph_type != DCCP_PKT_SYNC &&
ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
after48(ackno, dp->dccps_gar))
dp->dccps_gar = ackno;
} else {
unsigned long now = jiffies;
/*
* Step 6: Check sequence numbers
* Otherwise,
* If P.type == Reset,
* Send Sync packet acknowledging S.GSR
* Otherwise,
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*
* These Syncs are rate-limited as per RFC 4340, 7.5.4:
* at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
*/
if (time_before(now, (dp->dccps_rate_last +
sysctl_dccp_sync_ratelimit)))
return -1;
DCCP_WARN("Step 6 failed for %s packet, "
"(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
"(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
"sending SYNC...\n", dccp_packet_name(dh->dccph_type),
(unsigned long long) lswl, (unsigned long long) seqno,
(unsigned long long) dp->dccps_swh,
(ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
: "exists",
(unsigned long long) lawl, (unsigned long long) ackno,
(unsigned long long) dp->dccps_awh);
dp->dccps_rate_last = now;
if (dh->dccph_type == DCCP_PKT_RESET)
seqno = dp->dccps_gsr;
dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
return -1;
}
return 0;
}
static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
switch (dccp_hdr(skb)->dccph_type) {
case DCCP_PKT_DATAACK:
case DCCP_PKT_DATA:
/*
* FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when
* - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening"
* - sk_receive_queue is full, use Code 2, "Receive Buffer"
*/
dccp_enqueue_skb(sk, skb);
return 0;
case DCCP_PKT_ACK:
goto discard;
case DCCP_PKT_RESET:
/*
* Step 9: Process Reset
* If P.type == Reset,
* Tear down connection
* S.state := TIMEWAIT
* Set TIMEWAIT timer
* Drop packet and return
*/
dccp_rcv_reset(sk, skb);
return 0;
case DCCP_PKT_CLOSEREQ:
if (dccp_rcv_closereq(sk, skb))
return 0;
goto discard;
case DCCP_PKT_CLOSE:
if (dccp_rcv_close(sk, skb))
return 0;
goto discard;
case DCCP_PKT_REQUEST:
/* Step 7
* or (S.is_server and P.type == Response)
* or (S.is_client and P.type == Request)
* or (S.state >= OPEN and P.type == Request
* and P.seqno >= S.OSR)
* or (S.state >= OPEN and P.type == Response
* and P.seqno >= S.OSR)
* or (S.state == RESPOND and P.type == Data),
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
if (dp->dccps_role != DCCP_ROLE_LISTEN)
goto send_sync;
goto check_seq;
case DCCP_PKT_RESPONSE:
if (dp->dccps_role != DCCP_ROLE_CLIENT)
goto send_sync;
check_seq:
if (dccp_delta_seqno(dp->dccps_osr,
DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
send_sync:
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNC);
}
break;
case DCCP_PKT_SYNC:
dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
DCCP_PKT_SYNCACK);
/*
* From RFC 4340, sec. 5.7
*
* As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
* MAY have non-zero-length application data areas, whose
* contents receivers MUST ignore.
*/
goto discard;
}
DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
discard:
__kfree_skb(skb);
return 0;
}
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned len)
{
if (dccp_check_seqno(sk, skb))
goto discard;
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
return __dccp_rcv_established(sk, skb, dh, len);
discard:
__kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_rcv_established);
static int dccp_rcv_request_sent_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
const unsigned len)
{
/*
* Step 4: Prepare sequence numbers in REQUEST
* If S.state == REQUEST,
* If (P.type == Response or P.type == Reset)
* and S.AWL <= P.ackno <= S.AWH,
* / * Set sequence number variables corresponding to the
* other endpoint, so P will pass the tests in Step 6 * /
* Set S.GSR, S.ISR, S.SWL, S.SWH
* / * Response processing continues in Step 10; Reset
* processing continues in Step 9 * /
*/
if (dh->dccph_type == DCCP_PKT_RESPONSE) {
const struct inet_connection_sock *icsk = inet_csk(sk);
struct dccp_sock *dp = dccp_sk(sk);
long tstamp = dccp_timestamp();
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
"P.ackno=%llu, S.AWH=%llu\n",
(unsigned long long)dp->dccps_awl,
(unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long)dp->dccps_awh);
goto out_invalid_packet;
}
/*
* If option processing (Step 8) failed, return 1 here so that
* dccp_v4_do_rcv() sends a Reset. The Reset code depends on
* the option type and is set in dccp_parse_options().
*/
if (dccp_parse_options(sk, NULL, skb))
return 1;
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
dp->dccps_options_received.dccpor_timestamp_echo));
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
WARN_ON(sk->sk_send_head == NULL);
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
/*
* Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect
* and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH
* is done as part of activating the feature values below, since
* these settings depend on the local/remote Sequence Window
* features, which were undefined or not confirmed until now.
*/
dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
/*
* Step 10: Process REQUEST state (second part)
* If S.state == REQUEST,
* / * If we get here, P is a valid Response from the
* server (see Step 4), and we should move to
* PARTOPEN state. PARTOPEN means send an Ack,
* don't send Data packets, retransmit Acks
* periodically, and always include any Init Cookie
* from the Response * /
* S.state := PARTOPEN
* Set PARTOPEN timer
* Continue with S.state == PARTOPEN
* / * Step 12 will send the Ack completing the
* three-way handshake * /
*/
dccp_set_state(sk, DCCP_PARTOPEN);
/*
* If feature negotiation was successful, activate features now;
* an activation failure means that this host could not activate
* one ore more features (e.g. insufficient memory), which would
* leave at least one feature in an undefined state.
*/
if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
goto unable_to_proceed;
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
icsk->icsk_accept_queue.rskq_defer_accept) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
*
* It may be deleted, but with this feature tcpdumps
* look so _wonderfully_ clever, that I was not able
* to stand against the temptation 8) --ANK
*/
/*
* OK, in DCCP we can as well do a similar trick, its
* even in the draft, but there is no need for us to
* schedule an ack here, as dccp_sendmsg does this for
* us, also stated in the draft. -acme
*/
__kfree_skb(skb);
return 0;
}
dccp_send_ack(sk);
return -1;
}
out_invalid_packet:
/* dccp_v4_do_rcv will send a reset */
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
return 1;
unable_to_proceed:
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED;
/*
* We mark this socket as no longer usable, so that the loop in
* dccp_sendmsg() terminates and the application gets notified.
*/
dccp_set_state(sk, DCCP_CLOSED);
sk->sk_err = ECOMM;
return 1;
}
static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
struct sk_buff *skb,
const struct dccp_hdr *dh,
const unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
int queued = 0;
switch (dh->dccph_type) {
case DCCP_PKT_RESET:
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
break;
case DCCP_PKT_DATA:
if (sk->sk_state == DCCP_RESPOND)
break;
case DCCP_PKT_DATAACK:
case DCCP_PKT_ACK:
/*
* FIXME: we should be reseting the PARTOPEN (DELACK) timer
* here but only if we haven't used the DELACK timer for
* something else, like sending a delayed ack for a TIMESTAMP
* echo, etc, for now were not clearing it, sending an extra
* ACK when there is nothing else to do in DELACK is not a big
* deal after all.
*/
/* Stop the PARTOPEN timer */
if (sk->sk_state == DCCP_PARTOPEN)
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
/* Obtain usec RTT sample from SYN exchange (used by TFRC). */
if (likely(sample)) {
long delta = dccp_timestamp() - sample;
dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
}
dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_set_state(sk, DCCP_OPEN);
if (dh->dccph_type == DCCP_PKT_DATAACK ||
dh->dccph_type == DCCP_PKT_DATA) {
__dccp_rcv_established(sk, skb, dh, len);
queued = 1; /* packet was queued
(by __dccp_rcv_established) */
}
break;
}
return queued;
}
int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct dccp_hdr *dh, unsigned len)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
const int old_state = sk->sk_state;
int queued = 0;
/*
* Step 3: Process LISTEN state
*
* If S.state == LISTEN,
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
* Cookies Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
skb) < 0)
return 1;
goto discard;
}
if (dh->dccph_type == DCCP_PKT_RESET)
goto discard;
/* Caller (dccp_v4_do_rcv) will send Reset */
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
} else if (sk->sk_state == DCCP_CLOSED) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
return 1;
}
if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
if (dccp_check_seqno(sk, skb))
goto discard;
/*
* Step 8: Process options and mark acknowledgeable
*/
if (dccp_parse_options(sk, NULL, skb))
return 1;
dccp_handle_ackvec_processing(sk, skb);
dccp_deliver_input_to_ccids(sk, skb);
}
/*
* Step 9: Process Reset
* If P.type == Reset,
* Tear down connection
* S.state := TIMEWAIT
* Set TIMEWAIT timer
* Drop packet and return
*/
if (dh->dccph_type == DCCP_PKT_RESET) {
dccp_rcv_reset(sk, skb);
return 0;
/*
* Step 7: Check for unexpected packet types
* If (S.is_server and P.type == Response)
* or (S.is_client and P.type == Request)
* or (S.state == RESPOND and P.type == Data),
* Send Sync packet acknowledging P.seqno
* Drop packet and return
*/
} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
dh->dccph_type == DCCP_PKT_RESPONSE) ||
(dp->dccps_role == DCCP_ROLE_CLIENT &&
dh->dccph_type == DCCP_PKT_REQUEST) ||
(sk->sk_state == DCCP_RESPOND &&
dh->dccph_type == DCCP_PKT_DATA)) {
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
if (dccp_rcv_closereq(sk, skb))
return 0;
goto discard;
} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
if (dccp_rcv_close(sk, skb))
return 0;
goto discard;
}
switch (sk->sk_state) {
case DCCP_REQUESTING:
queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
if (queued >= 0)
return queued;
__kfree_skb(skb);
return 0;
case DCCP_RESPOND:
case DCCP_PARTOPEN:
queued = dccp_rcv_respond_partopen_state_process(sk, skb,
dh, len);
break;
}
if (dh->dccph_type == DCCP_PKT_ACK ||
dh->dccph_type == DCCP_PKT_DATAACK) {
switch (old_state) {
case DCCP_PARTOPEN:
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
break;
}
} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
goto discard;
}
if (!queued) {
discard:
__kfree_skb(skb);
}
return 0;
}
EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
/**
* dccp_sample_rtt - Validate and finalise computation of RTT sample
* @delta: number of microseconds between packet and acknowledgment
* The routine is kept generic to work in different contexts. It should be
* called immediately when the ACK used for the RTT sample arrives.
*/
u32 dccp_sample_rtt(struct sock *sk, long delta)
{
/* dccpor_elapsed_time is either zeroed out or set and > 0 */
delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
if (unlikely(delta <= 0)) {
DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
return DCCP_SANE_RTT_MIN;
}
if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
DCCP_WARN("RTT sample %ld too large, using max\n", delta);
return DCCP_SANE_RTT_MAX;
}
return delta;
}
| gpl-2.0 |
bsmitty83/B-Team-Sense | drivers/tty/serial/bfin_5xx.c | 2639 | 41193 | /*
* Blackfin On-Chip Serial Driver
*
* Copyright 2006-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Licensed under the GPL-2 or later.
*/
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#define DRIVER_NAME "bfin-uart"
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/kgdb.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <asm/portmux.h>
#include <asm/cacheflush.h>
#include <asm/dma.h>
#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
#include <asm/bfin_serial.h>
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
#endif
#ifdef CONFIG_SERIAL_BFIN_MODULE
# undef CONFIG_EARLY_PRINTK
#endif
/* UART name and device definitions */
#define BFIN_SERIAL_DEV_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
#define BFIN_SERIAL_MINOR 64
static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
# ifndef CONFIG_SERIAL_BFIN_PIO
# error KGDB only support UART in PIO mode.
# endif
static int kgdboc_port_line;
static int kgdboc_break_enabled;
#endif
/*
* Setup for console. Argument comes from the menuconfig
*/
#define DMA_RX_XCOUNT 512
#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
#else
static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
#endif
static void bfin_serial_reset_irda(struct uart_port *port);
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (uart->cts_pin < 0)
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
/* CTS PIN is negative assertive. */
if (UART_GET_CTS(uart))
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
else
return TIOCM_DSR | TIOCM_CAR;
}
static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (uart->rts_pin < 0)
return;
/* RTS PIN is negative assertive. */
if (mctrl & TIOCM_RTS)
UART_ENABLE_RTS(uart);
else
UART_DISABLE_RTS(uart);
}
/*
* Handle any change of modem status signal.
*/
static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned int status;
status = bfin_serial_get_mctrl(&uart->port);
uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
uart->scts = 1;
UART_CLEAR_SCTS(uart);
UART_CLEAR_IER(uart, EDSSI);
#endif
return IRQ_HANDLED;
}
#else
static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
#endif
/*
* interrupts are disabled on entry
*/
static void bfin_serial_stop_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
struct circ_buf *xmit = &uart->port.state->xmit;
#endif
while (!(UART_GET_LSR(uart) & TEMT))
cpu_relax();
#ifdef CONFIG_SERIAL_BFIN_DMA
disable_dma(uart->tx_dma_channel);
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
uart->tx_count = 0;
uart->tx_done = 1;
#else
#ifdef CONFIG_BF54x
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
UART_CLEAR_IER(uart, ETBEI);
#endif
}
/*
* port is locked and interrupts are disabled
*/
static void bfin_serial_start_tx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct tty_struct *tty = uart->port.state->port.tty;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
#endif
/*
* To avoid losting RX interrupt, we reset IR function
* before sending data.
*/
if (tty->termios->c_line == N_IRDA)
bfin_serial_reset_irda(port);
#ifdef CONFIG_SERIAL_BFIN_DMA
if (uart->tx_done)
bfin_serial_dma_tx_chars(uart);
#else
UART_SET_IER(uart, ETBEI);
bfin_serial_tx_chars(uart);
#endif
}
/*
* Interrupts are enabled
*/
static void bfin_serial_stop_rx(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
UART_CLEAR_IER(uart, ERBFI);
}
/*
* Set the modem control timer to fire immediately.
*/
static void bfin_serial_enable_ms(struct uart_port *port)
{
}
#if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO)
# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
#else
# define UART_GET_ANOMALY_THRESHOLD(uart) 0
# define UART_SET_ANOMALY_THRESHOLD(uart, v)
#endif
#ifdef CONFIG_SERIAL_BFIN_PIO
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
struct tty_struct *tty = NULL;
unsigned int status, ch, flg;
static struct timeval anomaly_start = { .tv_sec = 0 };
status = UART_GET_LSR(uart);
UART_CLEAR_LSR(uart);
ch = UART_GET_CHAR(uart);
uart->port.icount.rx++;
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
if (kgdb_connected && kgdboc_port_line == uart->port.line
&& kgdboc_break_enabled)
if (ch == 0x3) {/* Ctrl + C */
kgdb_breakpoint();
return;
}
if (!uart->port.state || !uart->port.state->port.tty)
return;
#endif
tty = uart->port.state->port.tty;
if (ANOMALY_05000363) {
/* The BF533 (and BF561) family of processors have a nice anomaly
* where they continuously generate characters for a "single" break.
* We have to basically ignore this flood until the "next" valid
* character comes across. Due to the nature of the flood, it is
* not possible to reliably catch bytes that are sent too quickly
* after this break. So application code talking to the Blackfin
* which sends a break signal must allow at least 1.5 character
* times after the end of the break for things to stabilize. This
* timeout was picked as it must absolutely be larger than 1
* character time +/- some percent. So 1.5 sounds good. All other
* Blackfin families operate properly. Woo.
*/
if (anomaly_start.tv_sec) {
struct timeval curr;
suseconds_t usecs;
if ((~ch & (~ch + 1)) & 0xff)
goto known_good_char;
do_gettimeofday(&curr);
if (curr.tv_sec - anomaly_start.tv_sec > 1)
goto known_good_char;
usecs = 0;
if (curr.tv_sec != anomaly_start.tv_sec)
usecs += USEC_PER_SEC;
usecs += curr.tv_usec - anomaly_start.tv_usec;
if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
goto known_good_char;
if (ch)
anomaly_start.tv_sec = 0;
else
anomaly_start = curr;
return;
known_good_char:
status &= ~BI;
anomaly_start.tv_sec = 0;
}
}
if (status & BI) {
if (ANOMALY_05000363)
if (bfin_revid() < 5)
do_gettimeofday(&anomaly_start);
uart->port.icount.brk++;
if (uart_handle_break(&uart->port))
goto ignore_char;
status &= ~(PE | FE);
}
if (status & PE)
uart->port.icount.parity++;
if (status & OE)
uart->port.icount.overrun++;
if (status & FE)
uart->port.icount.frame++;
status &= uart->port.read_status_mask;
if (status & BI)
flg = TTY_BREAK;
else if (status & PE)
flg = TTY_PARITY;
else if (status & FE)
flg = TTY_FRAME;
else
flg = TTY_NORMAL;
if (uart_handle_sysrq_char(&uart->port, ch))
goto ignore_char;
uart_insert_char(&uart->port, status, OE, ch, flg);
ignore_char:
tty_flip_buffer_push(tty);
}
static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
struct circ_buf *xmit = &uart->port.state->xmit;
if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
/* Clear TFI bit */
UART_PUT_LSR(uart, TFI);
#endif
/* Anomaly notes:
* 05000215 - we always clear ETBEI within last UART TX
* interrupt to end a string. It is always set
* when start a new tx.
*/
UART_CLEAR_IER(uart, ETBEI);
return;
}
if (uart->port.x_char) {
UART_PUT_CHAR(uart, uart->port.x_char);
uart->port.icount.tx++;
uart->port.x_char = 0;
}
while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uart->port);
}
static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
while (UART_GET_LSR(uart) & DR)
bfin_serial_rx_chars(uart);
return IRQ_HANDLED;
}
static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
#endif
spin_lock(&uart->port.lock);
if (UART_GET_LSR(uart) & THRE)
bfin_serial_tx_chars(uart);
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
}
#endif
#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
struct circ_buf *xmit = &uart->port.state->xmit;
uart->tx_done = 0;
if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
uart->tx_count = 0;
uart->tx_done = 1;
return;
}
if (uart->port.x_char) {
UART_PUT_CHAR(uart, uart->port.x_char);
uart->port.icount.tx++;
uart->port.x_char = 0;
}
uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
uart->tx_count = UART_XMIT_SIZE - xmit->tail;
blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
set_dma_config(uart->tx_dma_channel,
set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
INTR_ON_BUF,
DIMENSION_LINEAR,
DATA_SIZE_8,
DMA_SYNC_RESTART));
set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
set_dma_x_modify(uart->tx_dma_channel, 1);
SSYNC();
enable_dma(uart->tx_dma_channel);
UART_SET_IER(uart, ETBEI);
}
static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
struct tty_struct *tty = uart->port.state->port.tty;
int i, flg, status;
status = UART_GET_LSR(uart);
UART_CLEAR_LSR(uart);
uart->port.icount.rx +=
CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
UART_XMIT_SIZE);
if (status & BI) {
uart->port.icount.brk++;
if (uart_handle_break(&uart->port))
goto dma_ignore_char;
status &= ~(PE | FE);
}
if (status & PE)
uart->port.icount.parity++;
if (status & OE)
uart->port.icount.overrun++;
if (status & FE)
uart->port.icount.frame++;
status &= uart->port.read_status_mask;
if (status & BI)
flg = TTY_BREAK;
else if (status & PE)
flg = TTY_PARITY;
else if (status & FE)
flg = TTY_FRAME;
else
flg = TTY_NORMAL;
for (i = uart->rx_dma_buf.tail; ; i++) {
if (i >= UART_XMIT_SIZE)
i = 0;
if (i == uart->rx_dma_buf.head)
break;
if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
uart_insert_char(&uart->port, status, OE,
uart->rx_dma_buf.buf[i], flg);
}
dma_ignore_char:
tty_flip_buffer_push(tty);
}
void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
dma_disable_irq_nosync(uart->rx_dma_channel);
spin_lock_bh(&uart->rx_lock);
/* 2D DMA RX buffer ring is used. Because curr_y_count and
* curr_x_count can't be read as an atomic operation,
* curr_y_count should be read before curr_x_count. When
* curr_x_count is read, curr_y_count may already indicate
* next buffer line. But, the position calculated here is
* still indicate the old line. The wrong position data may
* be smaller than current buffer tail, which cause garbages
* are received if it is not prohibit.
*/
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
x_pos = DMA_RX_XCOUNT - x_pos;
if (x_pos == DMA_RX_XCOUNT)
x_pos = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
/* Ignore receiving data if new position is in the same line of
* current buffer tail and small.
*/
if (pos > uart->rx_dma_buf.tail ||
uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
spin_unlock_bh(&uart->rx_lock);
dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.state->xmit;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
#endif
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
clear_dma_irqstat(uart->tx_dma_channel);
/* Anomaly notes:
* 05000215 - we always clear ETBEI within last UART TX
* interrupt to end a string. It is always set
* when start a new tx.
*/
UART_CLEAR_IER(uart, ETBEI);
xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
uart->port.icount.tx += uart->tx_count;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uart->port);
bfin_serial_dma_tx_chars(uart);
}
spin_unlock(&uart->port.lock);
return IRQ_HANDLED;
}
static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
unsigned short irqstat;
int x_pos, pos;
spin_lock(&uart->rx_lock);
irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
clear_dma_irqstat(uart->rx_dma_channel);
uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
uart->rx_dma_nrows = 0;
pos = uart->rx_dma_nrows * DMA_RX_XCOUNT;
if (pos > uart->rx_dma_buf.tail ||
uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
uart->rx_dma_buf.head = pos;
bfin_serial_dma_rx_chars(uart);
uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
}
spin_unlock(&uart->rx_lock);
return IRQ_HANDLED;
}
#endif
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int bfin_serial_tx_empty(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short lsr;
lsr = UART_GET_LSR(uart);
if (lsr & TEMT)
return TIOCSER_TEMT;
else
return 0;
}
static void bfin_serial_break_ctl(struct uart_port *port, int break_state)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
u16 lcr = UART_GET_LCR(uart);
if (break_state)
lcr |= SB;
else
lcr &= ~SB;
UART_PUT_LCR(uart, lcr);
SSYNC();
}
static int bfin_serial_startup(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
dma_addr_t dma_handle;
if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
return -EBUSY;
}
if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
free_dma(uart->rx_dma_channel);
return -EBUSY;
}
set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);
uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
uart->rx_dma_buf.head = 0;
uart->rx_dma_buf.tail = 0;
uart->rx_dma_nrows = 0;
set_dma_config(uart->rx_dma_channel,
set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
INTR_ON_ROW, DIMENSION_2D,
DATA_SIZE_8,
DMA_SYNC_RESTART));
set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
set_dma_x_modify(uart->rx_dma_channel, 1);
set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
set_dma_y_modify(uart->rx_dma_channel, 1);
set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
enable_dma(uart->rx_dma_channel);
uart->rx_dma_timer.data = (unsigned long)(uart);
uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
add_timer(&(uart->rx_dma_timer));
#else
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
kgdboc_break_enabled = 0;
else {
# endif
if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
"BFIN_UART_RX", uart)) {
printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
return -EBUSY;
}
if (request_irq
(uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
"BFIN_UART_TX", uart)) {
printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
free_irq(uart->port.irq, uart);
return -EBUSY;
}
# ifdef CONFIG_BF54x
{
/*
* UART2 and UART3 on BF548 share interrupt PINs and DMA
* controllers with SPORT2 and SPORT3. UART rx and tx
* interrupts are generated in PIO mode only when configure
* their peripheral mapping registers properly, which means
* request corresponding DMA channels in PIO mode as well.
*/
unsigned uart_dma_ch_rx, uart_dma_ch_tx;
switch (uart->port.irq) {
case IRQ_UART3_RX:
uart_dma_ch_rx = CH_UART3_RX;
uart_dma_ch_tx = CH_UART3_TX;
break;
case IRQ_UART2_RX:
uart_dma_ch_rx = CH_UART2_RX;
uart_dma_ch_tx = CH_UART2_TX;
break;
default:
uart_dma_ch_rx = uart_dma_ch_tx = 0;
break;
};
if (uart_dma_ch_rx &&
request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
printk(KERN_NOTICE"Fail to attach UART interrupt\n");
free_irq(uart->port.irq, uart);
free_irq(uart->port.irq + 1, uart);
return -EBUSY;
}
if (uart_dma_ch_tx &&
request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
printk(KERN_NOTICE "Fail to attach UART interrupt\n");
free_dma(uart_dma_ch_rx);
free_irq(uart->port.irq, uart);
free_irq(uart->port.irq + 1, uart);
return -EBUSY;
}
}
# endif
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
}
# endif
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(gpio_to_irq(uart->cts_pin),
bfin_serial_mctrl_cts_int,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
uart->cts_pin = -1;
pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
if (uart->rts_pin >= 0) {
gpio_direction_output(uart->rts_pin, 0);
}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
bfin_serial_mctrl_cts_int,
IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
uart->cts_pin = -1;
pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
}
/* CTS RTS PINs are negative assertive. */
UART_PUT_MCR(uart, ACTS);
UART_SET_IER(uart, EDSSI);
#endif
UART_SET_IER(uart, ERBFI);
return 0;
}
static void bfin_serial_shutdown(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
#ifdef CONFIG_SERIAL_BFIN_DMA
disable_dma(uart->tx_dma_channel);
free_dma(uart->tx_dma_channel);
disable_dma(uart->rx_dma_channel);
free_dma(uart->rx_dma_channel);
del_timer(&(uart->rx_dma_timer));
dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
#else
#ifdef CONFIG_BF54x
switch (uart->port.irq) {
case IRQ_UART3_RX:
free_dma(CH_UART3_RX);
free_dma(CH_UART3_TX);
break;
case IRQ_UART2_RX:
free_dma(CH_UART2_RX);
free_dma(CH_UART2_TX);
break;
default:
break;
};
#endif
free_irq(uart->port.irq, uart);
free_irq(uart->port.irq+1, uart);
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
free_irq(uart->status_irq, uart);
#endif
}
static void
bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned long flags;
unsigned int baud, quot;
unsigned short val, ier, lcr = 0;
switch (termios->c_cflag & CSIZE) {
case CS8:
lcr = WLS(8);
break;
case CS7:
lcr = WLS(7);
break;
case CS6:
lcr = WLS(6);
break;
case CS5:
lcr = WLS(5);
break;
default:
printk(KERN_ERR "%s: word lengh not supported\n",
__func__);
}
/* Anomaly notes:
* 05000231 - STOP bit is always set to 1 whatever the user is set.
*/
if (termios->c_cflag & CSTOPB) {
if (ANOMALY_05000231)
printk(KERN_WARNING "STOP bits other than 1 is not "
"supported in case of anomaly 05000231.\n");
else
lcr |= STB;
}
if (termios->c_cflag & PARENB)
lcr |= PEN;
if (!(termios->c_cflag & PARODD))
lcr |= EPS;
if (termios->c_cflag & CMSPAR)
lcr |= STP;
spin_lock_irqsave(&uart->port.lock, flags);
port->read_status_mask = OE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (FE | PE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= BI;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= FE | PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= BI;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= OE;
}
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
/* If discipline is not IRDA, apply ANOMALY_05000230 */
if (termios->c_line != N_IRDA)
quot -= ANOMALY_05000230;
UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
/* Disable UART */
ier = UART_GET_IER(uart);
UART_DISABLE_INTS(uart);
/* Set DLAB in LCR to Access DLL and DLH */
UART_SET_DLAB(uart);
UART_PUT_DLL(uart, quot & 0xFF);
UART_PUT_DLH(uart, (quot >> 8) & 0xFF);
SSYNC();
/* Clear DLAB in LCR to Access THR RBR IER */
UART_CLEAR_DLAB(uart);
UART_PUT_LCR(uart, lcr);
/* Enable UART */
UART_ENABLE_INTS(uart, ier);
val = UART_GET_GCTL(uart);
val |= UCEN;
UART_PUT_GCTL(uart, val);
/* Port speed changed, update the per-port timeout. */
uart_update_timeout(port, termios->c_cflag, baud);
spin_unlock_irqrestore(&uart->port.lock, flags);
}
static const char *bfin_serial_type(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void bfin_serial_release_port(struct uart_port *port)
{
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int bfin_serial_request_port(struct uart_port *port)
{
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void bfin_serial_config_port(struct uart_port *port, int flags)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
if (flags & UART_CONFIG_TYPE &&
bfin_serial_request_port(&uart->port) == 0)
uart->port.type = PORT_BFIN;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_BFIN and PORT_UNKNOWN
*/
static int
bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
{
return 0;
}
/*
* Enable the IrDA function if tty->ldisc.num is N_IRDA.
* In other cases, disable IrDA function.
*/
static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
switch (ld) {
case N_IRDA:
val = UART_GET_GCTL(uart);
val |= (IREN | RPOLC);
UART_PUT_GCTL(uart, val);
break;
default:
val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
UART_PUT_GCTL(uart, val);
}
}
static void bfin_serial_reset_irda(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned short val;
val = UART_GET_GCTL(uart);
val &= ~(IREN | RPOLC);
UART_PUT_GCTL(uart, val);
SSYNC();
val |= (IREN | RPOLC);
UART_PUT_GCTL(uart, val);
SSYNC();
}
#ifdef CONFIG_CONSOLE_POLL
/* Anomaly notes:
* 05000099 - Because we only use THRE in poll_put and DR in poll_get,
* losing other bits of UART_LSR is not a problem here.
*/
static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
while (!(UART_GET_LSR(uart) & THRE))
cpu_relax();
UART_CLEAR_DLAB(uart);
UART_PUT_CHAR(uart, (unsigned char)chr);
}
static int bfin_serial_poll_get_char(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
unsigned char chr;
while (!(UART_GET_LSR(uart) & DR))
cpu_relax();
UART_CLEAR_DLAB(uart);
chr = UART_GET_CHAR(uart);
return chr;
}
#endif
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
static void bfin_kgdboc_port_shutdown(struct uart_port *port)
{
if (kgdboc_break_enabled) {
kgdboc_break_enabled = 0;
bfin_serial_shutdown(port);
}
}
static int bfin_kgdboc_port_startup(struct uart_port *port)
{
kgdboc_port_line = port->line;
kgdboc_break_enabled = !bfin_serial_startup(port);
return 0;
}
#endif
static struct uart_ops bfin_serial_pops = {
.tx_empty = bfin_serial_tx_empty,
.set_mctrl = bfin_serial_set_mctrl,
.get_mctrl = bfin_serial_get_mctrl,
.stop_tx = bfin_serial_stop_tx,
.start_tx = bfin_serial_start_tx,
.stop_rx = bfin_serial_stop_rx,
.enable_ms = bfin_serial_enable_ms,
.break_ctl = bfin_serial_break_ctl,
.startup = bfin_serial_startup,
.shutdown = bfin_serial_shutdown,
.set_termios = bfin_serial_set_termios,
.set_ldisc = bfin_serial_set_ldisc,
.type = bfin_serial_type,
.release_port = bfin_serial_release_port,
.request_port = bfin_serial_request_port,
.config_port = bfin_serial_config_port,
.verify_port = bfin_serial_verify_port,
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
.kgdboc_port_startup = bfin_kgdboc_port_startup,
.kgdboc_port_shutdown = bfin_kgdboc_port_shutdown,
#endif
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = bfin_serial_poll_put_char,
.poll_get_char = bfin_serial_poll_get_char,
#endif
};
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
int *parity, int *bits)
{
unsigned short status;
status = UART_GET_IER(uart) & (ERBFI | ETBEI);
if (status == (ERBFI | ETBEI)) {
/* ok, the port was enabled */
u16 lcr, dlh, dll;
lcr = UART_GET_LCR(uart);
*parity = 'n';
if (lcr & PEN) {
if (lcr & EPS)
*parity = 'e';
else
*parity = 'o';
}
switch (lcr & 0x03) {
case 0: *bits = 5; break;
case 1: *bits = 6; break;
case 2: *bits = 7; break;
case 3: *bits = 8; break;
}
/* Set DLAB in LCR to Access DLL and DLH */
UART_SET_DLAB(uart);
dll = UART_GET_DLL(uart);
dlh = UART_GET_DLH(uart);
/* Clear DLAB in LCR to Access THR RBR IER */
UART_CLEAR_DLAB(uart);
*baud = get_sclk() / (16*(dll | dlh << 8));
}
pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
}
static struct uart_driver bfin_serial_reg;
static void bfin_serial_console_putchar(struct uart_port *port, int ch)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
while (!(UART_GET_LSR(uart) & THRE))
barrier();
UART_PUT_CHAR(uart, ch);
}
#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
defined (CONFIG_EARLY_PRINTK) */
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
#define CLASS_BFIN_CONSOLE "bfin-console"
/*
* Interrupts are disabled on entering
*/
static void
bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
{
struct bfin_serial_port *uart = bfin_serial_ports[co->index];
unsigned long flags;
spin_lock_irqsave(&uart->port.lock, flags);
uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
spin_unlock_irqrestore(&uart->port.lock, flags);
}
static int __init
bfin_serial_console_setup(struct console *co, char *options)
{
struct bfin_serial_port *uart;
int baud = 57600;
int bits = 8;
int parity = 'n';
# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
int flow = 'r';
# else
int flow = 'n';
# endif
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
return -ENODEV;
uart = bfin_serial_ports[co->index];
if (!uart)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
bfin_serial_console_get_options(uart, &baud, &parity, &bits);
return uart_set_options(&uart->port, co, baud, parity, bits, flow);
}
static struct console bfin_serial_console = {
.name = BFIN_SERIAL_DEV_NAME,
.write = bfin_serial_console_write,
.device = uart_console_device,
.setup = bfin_serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
#define BFIN_SERIAL_CONSOLE &bfin_serial_console
#else
#define BFIN_SERIAL_CONSOLE NULL
#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
#ifdef CONFIG_EARLY_PRINTK
static struct bfin_serial_port bfin_earlyprintk_port;
#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
/*
* Interrupts are disabled on entering
*/
static void
bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
{
unsigned long flags;
if (bfin_earlyprintk_port.port.line != co->index)
return;
spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
uart_console_write(&bfin_earlyprintk_port.port, s, count,
bfin_serial_console_putchar);
spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
}
/*
* This should have a .setup or .early_setup in it, but then things get called
* without the command line options, and the baud rate gets messed up - so
* don't let the common infrastructure play with things. (see calls to setup
* & earlysetup in ./kernel/printk.c:register_console()
*/
static struct __initdata console bfin_early_serial_console = {
.name = "early_BFuart",
.write = bfin_earlyprintk_console_write,
.device = uart_console_device,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &bfin_serial_reg,
};
#endif
static struct uart_driver bfin_serial_reg = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = BFIN_SERIAL_DEV_NAME,
.major = BFIN_SERIAL_MAJOR,
.minor = BFIN_SERIAL_MINOR,
.nr = BFIN_UART_NR_PORTS,
.cons = BFIN_SERIAL_CONSOLE,
};
static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
return uart_suspend_port(&bfin_serial_reg, &uart->port);
}
static int bfin_serial_resume(struct platform_device *pdev)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
return uart_resume_port(&bfin_serial_reg, &uart->port);
}
static int bfin_serial_probe(struct platform_device *pdev)
{
struct resource *res;
struct bfin_serial_port *uart = NULL;
int ret = 0;
if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
return -ENOENT;
}
if (bfin_serial_ports[pdev->id] == NULL) {
uart = kzalloc(sizeof(*uart), GFP_KERNEL);
if (!uart) {
dev_err(&pdev->dev,
"fail to malloc bfin_serial_port\n");
return -ENOMEM;
}
bfin_serial_ports[pdev->id] = uart;
#ifdef CONFIG_EARLY_PRINTK
if (!(bfin_earlyprintk_port.port.membase
&& bfin_earlyprintk_port.port.line == pdev->id)) {
/*
* If the peripheral PINs of current port is allocated
* in earlyprintk probe stage, don't do it again.
*/
#endif
ret = peripheral_request_list(
(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
goto out_error_free_mem;
}
#ifdef CONFIG_EARLY_PRINTK
}
#endif
spin_lock_init(&uart->port.lock);
uart->port.uartclk = get_sclk();
uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
uart->port.ops = &bfin_serial_pops;
uart->port.line = pdev->id;
uart->port.iotype = UPIO_MEM;
uart->port.flags = UPF_BOOT_AUTOCONF;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
ret = -ENOENT;
goto out_error_free_peripherals;
}
uart->port.membase = ioremap(res->start,
res->end - res->start);
if (!uart->port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
uart->port.mapbase = res->start;
uart->port.irq = platform_get_irq(pdev, 0);
if (uart->port.irq < 0) {
dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->status_irq = platform_get_irq(pdev, 1);
if (uart->status_irq < 0) {
dev_err(&pdev->dev, "No uart status IRQ specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
#ifdef CONFIG_SERIAL_BFIN_DMA
spin_lock_init(&uart->rx_lock);
uart->tx_done = 1;
uart->tx_count = 0;
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->tx_dma_channel = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (res == NULL) {
dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
ret = -ENOENT;
goto out_error_unmap;
}
uart->rx_dma_channel = res->start;
init_timer(&(uart->rx_dma_timer));
#endif
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;
else
uart->cts_pin = res->start;
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
uart->rts_pin = -1;
else
uart->rts_pin = res->start;
# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
if (uart->rts_pin >= 0)
gpio_request(uart->rts_pin, DRIVER_NAME);
# endif
#endif
}
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
if (!is_early_platform_device(pdev)) {
#endif
uart = bfin_serial_ports[pdev->id];
uart->port.dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, uart);
ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
}
#endif
if (!ret)
return 0;
if (uart) {
out_error_unmap:
iounmap(uart->port.membase);
out_error_free_peripherals:
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
out_error_free_mem:
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
return ret;
}
static int __devexit bfin_serial_remove(struct platform_device *pdev)
{
struct bfin_serial_port *uart = platform_get_drvdata(pdev);
dev_set_drvdata(&pdev->dev, NULL);
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->rts_pin >= 0)
gpio_free(uart->rts_pin);
#endif
iounmap(uart->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
return 0;
}
static struct platform_driver bfin_serial_driver = {
.probe = bfin_serial_probe,
.remove = __devexit_p(bfin_serial_remove),
.suspend = bfin_serial_suspend,
.resume = bfin_serial_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
static __initdata struct early_platform_driver early_bfin_serial_driver = {
.class_str = CLASS_BFIN_CONSOLE,
.pdrv = &bfin_serial_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
static int __init bfin_serial_rs_console_init(void)
{
early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
register_console(&bfin_serial_console);
return 0;
}
console_initcall(bfin_serial_rs_console_init);
#endif
#ifdef CONFIG_EARLY_PRINTK
/*
* Memory can't be allocated dynamically during earlyprink init stage.
* So, do individual probe for earlyprink with a static uart port variable.
*/
static int bfin_earlyprintk_probe(struct platform_device *pdev)
{
struct resource *res;
int ret;
if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
return -ENOENT;
}
ret = peripheral_request_list(
(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
return ret;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
ret = -ENOENT;
goto out_error_free_peripherals;
}
bfin_earlyprintk_port.port.membase = ioremap(res->start,
res->end - res->start);
if (!bfin_earlyprintk_port.port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
goto out_error_free_peripherals;
}
bfin_earlyprintk_port.port.mapbase = res->start;
bfin_earlyprintk_port.port.line = pdev->id;
bfin_earlyprintk_port.port.uartclk = get_sclk();
bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
spin_lock_init(&bfin_earlyprintk_port.port.lock);
return 0;
out_error_free_peripherals:
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
return ret;
}
static struct platform_driver bfin_earlyprintk_driver = {
.probe = bfin_earlyprintk_probe,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
.class_str = CLASS_BFIN_EARLYPRINTK,
.pdrv = &bfin_earlyprintk_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
};
struct console __init *bfin_earlyserial_init(unsigned int port,
unsigned int cflag)
{
struct ktermios t;
char port_name[20];
if (port < 0 || port >= BFIN_UART_NR_PORTS)
return NULL;
/*
* Only probe resource of the given port in earlyprintk boot arg.
* The expected port id should be indicated in port name string.
*/
snprintf(port_name, 20, DRIVER_NAME ".%d", port);
early_platform_driver_register(&early_bfin_earlyprintk_driver,
port_name);
early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
if (!bfin_earlyprintk_port.port.membase)
return NULL;
#ifdef CONFIG_SERIAL_BFIN_CONSOLE
/*
* If we are using early serial, don't let the normal console rewind
* log buffer, since that causes things to be printed multiple times
*/
bfin_serial_console.flags &= ~CON_PRINTBUFFER;
#endif
bfin_early_serial_console.index = port;
t.c_cflag = cflag;
t.c_iflag = 0;
t.c_oflag = 0;
t.c_lflag = ICANON;
t.c_line = port;
bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
return &bfin_early_serial_console;
}
#endif /* CONFIG_EARLY_PRINTK */
static int __init bfin_serial_init(void)
{
int ret;
pr_info("Blackfin serial driver\n");
ret = uart_register_driver(&bfin_serial_reg);
if (ret) {
pr_err("failed to register %s:%d\n",
bfin_serial_reg.driver_name, ret);
}
ret = platform_driver_register(&bfin_serial_driver);
if (ret) {
pr_err("fail to register bfin uart\n");
uart_unregister_driver(&bfin_serial_reg);
}
return ret;
}
static void __exit bfin_serial_exit(void)
{
platform_driver_unregister(&bfin_serial_driver);
uart_unregister_driver(&bfin_serial_reg);
}
module_init(bfin_serial_init);
module_exit(bfin_serial_exit);
MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
MODULE_DESCRIPTION("Blackfin generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
MODULE_ALIAS("platform:bfin-uart");
| gpl-2.0 |
vo1dz777/android_kernel_samsung_manta | drivers/pinctrl/pinconf-generic.c | 3151 | 3559 | /*
* Core driver for the generic pin config portions of the pin control subsystem
*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
*
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) "generic pinconfig core: " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include "core.h"
#include "pinconf.h"
#ifdef CONFIG_DEBUG_FS
struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
};
#define PCONFDUMP(a, b, c) { .param = a, .display = b, .format = c }
struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
};
void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
int i;
if (!ops->is_generic)
return;
for(i = 0; i < ARRAY_SIZE(conf_items); i++) {
unsigned long config;
int ret;
/* We want to check out this parameter */
config = pinconf_to_config_packed(conf_items[i].param, 0);
ret = pin_config_get_for_pin(pctldev, pin, &config);
/* These are legal errors */
if (ret == -EINVAL || ret == -ENOTSUPP)
continue;
if (ret) {
seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
continue;
}
/* Space between multiple configs */
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
if (conf_items[i].format &&
pinconf_to_config_argument(config) != 0)
seq_printf(s, " (%u %s)",
pinconf_to_config_argument(config),
conf_items[i].format);
}
}
void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
struct seq_file *s, const char *gname)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
int i;
if (!ops->is_generic)
return;
for(i = 0; i < ARRAY_SIZE(conf_items); i++) {
unsigned long config;
int ret;
/* We want to check out this parameter */
config = pinconf_to_config_packed(conf_items[i].param, 0);
ret = pin_config_group_get(dev_name(pctldev->dev), gname,
&config);
/* These are legal errors */
if (ret == -EINVAL || ret == -ENOTSUPP)
continue;
if (ret) {
seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
continue;
}
/* Space between multiple configs */
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
if (conf_items[i].format && config != 0)
seq_printf(s, " (%u %s)",
pinconf_to_config_argument(config),
conf_items[i].format);
}
}
#endif
| gpl-2.0 |
HTCKernels/One-SV-international-k2u | drivers/tty/serial/ioc4_serial.c | 4943 | 82850 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* This file contains a module version of the ioc4 serial driver. This
* includes all the support functions needed (support functions, etc.)
* and the serial driver itself.
*/
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/circ_buf.h>
#include <linux/serial_reg.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ioc4.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
/*
* interesting things about the ioc4
*/
#define IOC4_NUM_SERIAL_PORTS 4 /* max ports per card */
#define IOC4_NUM_CARDS 8 /* max cards per partition */
#define GET_SIO_IR(_n) (_n == 0) ? (IOC4_SIO_IR_S0) : \
(_n == 1) ? (IOC4_SIO_IR_S1) : \
(_n == 2) ? (IOC4_SIO_IR_S2) : \
(IOC4_SIO_IR_S3)
#define GET_OTHER_IR(_n) (_n == 0) ? (IOC4_OTHER_IR_S0_MEMERR) : \
(_n == 1) ? (IOC4_OTHER_IR_S1_MEMERR) : \
(_n == 2) ? (IOC4_OTHER_IR_S2_MEMERR) : \
(IOC4_OTHER_IR_S3_MEMERR)
/*
* All IOC4 registers are 32 bits wide.
*/
/*
* PCI Memory Space Map
*/
#define IOC4_PCI_ERR_ADDR_L 0x000 /* Low Error Address */
#define IOC4_PCI_ERR_ADDR_VLD (0x1 << 0)
#define IOC4_PCI_ERR_ADDR_MST_ID_MSK (0xf << 1)
#define IOC4_PCI_ERR_ADDR_MST_NUM_MSK (0xe << 1)
#define IOC4_PCI_ERR_ADDR_MST_TYP_MSK (0x1 << 1)
#define IOC4_PCI_ERR_ADDR_MUL_ERR (0x1 << 5)
#define IOC4_PCI_ERR_ADDR_ADDR_MSK (0x3ffffff << 6)
/* Interrupt types */
#define IOC4_SIO_INTR_TYPE 0
#define IOC4_OTHER_INTR_TYPE 1
#define IOC4_NUM_INTR_TYPES 2
/* Bitmasks for IOC4_SIO_IR, IOC4_SIO_IEC, and IOC4_SIO_IES */
#define IOC4_SIO_IR_S0_TX_MT 0x00000001 /* Serial port 0 TX empty */
#define IOC4_SIO_IR_S0_RX_FULL 0x00000002 /* Port 0 RX buf full */
#define IOC4_SIO_IR_S0_RX_HIGH 0x00000004 /* Port 0 RX hiwat */
#define IOC4_SIO_IR_S0_RX_TIMER 0x00000008 /* Port 0 RX timeout */
#define IOC4_SIO_IR_S0_DELTA_DCD 0x00000010 /* Port 0 delta DCD */
#define IOC4_SIO_IR_S0_DELTA_CTS 0x00000020 /* Port 0 delta CTS */
#define IOC4_SIO_IR_S0_INT 0x00000040 /* Port 0 pass-thru intr */
#define IOC4_SIO_IR_S0_TX_EXPLICIT 0x00000080 /* Port 0 explicit TX thru */
#define IOC4_SIO_IR_S1_TX_MT 0x00000100 /* Serial port 1 */
#define IOC4_SIO_IR_S1_RX_FULL 0x00000200 /* */
#define IOC4_SIO_IR_S1_RX_HIGH 0x00000400 /* */
#define IOC4_SIO_IR_S1_RX_TIMER 0x00000800 /* */
#define IOC4_SIO_IR_S1_DELTA_DCD 0x00001000 /* */
#define IOC4_SIO_IR_S1_DELTA_CTS 0x00002000 /* */
#define IOC4_SIO_IR_S1_INT 0x00004000 /* */
#define IOC4_SIO_IR_S1_TX_EXPLICIT 0x00008000 /* */
#define IOC4_SIO_IR_S2_TX_MT 0x00010000 /* Serial port 2 */
#define IOC4_SIO_IR_S2_RX_FULL 0x00020000 /* */
#define IOC4_SIO_IR_S2_RX_HIGH 0x00040000 /* */
#define IOC4_SIO_IR_S2_RX_TIMER 0x00080000 /* */
#define IOC4_SIO_IR_S2_DELTA_DCD 0x00100000 /* */
#define IOC4_SIO_IR_S2_DELTA_CTS 0x00200000 /* */
#define IOC4_SIO_IR_S2_INT 0x00400000 /* */
#define IOC4_SIO_IR_S2_TX_EXPLICIT 0x00800000 /* */
#define IOC4_SIO_IR_S3_TX_MT 0x01000000 /* Serial port 3 */
#define IOC4_SIO_IR_S3_RX_FULL 0x02000000 /* */
#define IOC4_SIO_IR_S3_RX_HIGH 0x04000000 /* */
#define IOC4_SIO_IR_S3_RX_TIMER 0x08000000 /* */
#define IOC4_SIO_IR_S3_DELTA_DCD 0x10000000 /* */
#define IOC4_SIO_IR_S3_DELTA_CTS 0x20000000 /* */
#define IOC4_SIO_IR_S3_INT 0x40000000 /* */
#define IOC4_SIO_IR_S3_TX_EXPLICIT 0x80000000 /* */
/* Per device interrupt masks */
#define IOC4_SIO_IR_S0 (IOC4_SIO_IR_S0_TX_MT | \
IOC4_SIO_IR_S0_RX_FULL | \
IOC4_SIO_IR_S0_RX_HIGH | \
IOC4_SIO_IR_S0_RX_TIMER | \
IOC4_SIO_IR_S0_DELTA_DCD | \
IOC4_SIO_IR_S0_DELTA_CTS | \
IOC4_SIO_IR_S0_INT | \
IOC4_SIO_IR_S0_TX_EXPLICIT)
#define IOC4_SIO_IR_S1 (IOC4_SIO_IR_S1_TX_MT | \
IOC4_SIO_IR_S1_RX_FULL | \
IOC4_SIO_IR_S1_RX_HIGH | \
IOC4_SIO_IR_S1_RX_TIMER | \
IOC4_SIO_IR_S1_DELTA_DCD | \
IOC4_SIO_IR_S1_DELTA_CTS | \
IOC4_SIO_IR_S1_INT | \
IOC4_SIO_IR_S1_TX_EXPLICIT)
#define IOC4_SIO_IR_S2 (IOC4_SIO_IR_S2_TX_MT | \
IOC4_SIO_IR_S2_RX_FULL | \
IOC4_SIO_IR_S2_RX_HIGH | \
IOC4_SIO_IR_S2_RX_TIMER | \
IOC4_SIO_IR_S2_DELTA_DCD | \
IOC4_SIO_IR_S2_DELTA_CTS | \
IOC4_SIO_IR_S2_INT | \
IOC4_SIO_IR_S2_TX_EXPLICIT)
#define IOC4_SIO_IR_S3 (IOC4_SIO_IR_S3_TX_MT | \
IOC4_SIO_IR_S3_RX_FULL | \
IOC4_SIO_IR_S3_RX_HIGH | \
IOC4_SIO_IR_S3_RX_TIMER | \
IOC4_SIO_IR_S3_DELTA_DCD | \
IOC4_SIO_IR_S3_DELTA_CTS | \
IOC4_SIO_IR_S3_INT | \
IOC4_SIO_IR_S3_TX_EXPLICIT)
/* Bitmasks for IOC4_OTHER_IR, IOC4_OTHER_IEC, and IOC4_OTHER_IES */
#define IOC4_OTHER_IR_ATA_INT 0x00000001 /* ATAPI intr pass-thru */
#define IOC4_OTHER_IR_ATA_MEMERR 0x00000002 /* ATAPI DMA PCI error */
#define IOC4_OTHER_IR_S0_MEMERR 0x00000004 /* Port 0 PCI error */
#define IOC4_OTHER_IR_S1_MEMERR 0x00000008 /* Port 1 PCI error */
#define IOC4_OTHER_IR_S2_MEMERR 0x00000010 /* Port 2 PCI error */
#define IOC4_OTHER_IR_S3_MEMERR 0x00000020 /* Port 3 PCI error */
#define IOC4_OTHER_IR_KBD_INT 0x00000040 /* Keyboard/mouse */
#define IOC4_OTHER_IR_RESERVED 0x007fff80 /* Reserved */
#define IOC4_OTHER_IR_RT_INT 0x00800000 /* INT_OUT section output */
#define IOC4_OTHER_IR_GEN_INT 0xff000000 /* Generic pins */
#define IOC4_OTHER_IR_SER_MEMERR (IOC4_OTHER_IR_S0_MEMERR | IOC4_OTHER_IR_S1_MEMERR | \
IOC4_OTHER_IR_S2_MEMERR | IOC4_OTHER_IR_S3_MEMERR)
/* Bitmasks for IOC4_SIO_CR */
#define IOC4_SIO_CR_CMD_PULSE_SHIFT 0 /* byte bus strobe shift */
#define IOC4_SIO_CR_ARB_DIAG_TX0 0x00000000
#define IOC4_SIO_CR_ARB_DIAG_RX0 0x00000010
#define IOC4_SIO_CR_ARB_DIAG_TX1 0x00000020
#define IOC4_SIO_CR_ARB_DIAG_RX1 0x00000030
#define IOC4_SIO_CR_ARB_DIAG_TX2 0x00000040
#define IOC4_SIO_CR_ARB_DIAG_RX2 0x00000050
#define IOC4_SIO_CR_ARB_DIAG_TX3 0x00000060
#define IOC4_SIO_CR_ARB_DIAG_RX3 0x00000070
#define IOC4_SIO_CR_SIO_DIAG_IDLE 0x00000080 /* 0 -> active request among
serial ports (ro) */
/* Defs for some of the generic I/O pins */
#define IOC4_GPCR_UART0_MODESEL 0x10 /* Pin is output to port 0
mode sel */
#define IOC4_GPCR_UART1_MODESEL 0x20 /* Pin is output to port 1
mode sel */
#define IOC4_GPCR_UART2_MODESEL 0x40 /* Pin is output to port 2
mode sel */
#define IOC4_GPCR_UART3_MODESEL 0x80 /* Pin is output to port 3
mode sel */
#define IOC4_GPPR_UART0_MODESEL_PIN 4 /* GIO pin controlling
uart 0 mode select */
#define IOC4_GPPR_UART1_MODESEL_PIN 5 /* GIO pin controlling
uart 1 mode select */
#define IOC4_GPPR_UART2_MODESEL_PIN 6 /* GIO pin controlling
uart 2 mode select */
#define IOC4_GPPR_UART3_MODESEL_PIN 7 /* GIO pin controlling
uart 3 mode select */
/* Bitmasks for serial RX status byte */
#define IOC4_RXSB_OVERRUN 0x01 /* Char(s) lost */
#define IOC4_RXSB_PAR_ERR 0x02 /* Parity error */
#define IOC4_RXSB_FRAME_ERR 0x04 /* Framing error */
#define IOC4_RXSB_BREAK 0x08 /* Break character */
#define IOC4_RXSB_CTS 0x10 /* State of CTS */
#define IOC4_RXSB_DCD 0x20 /* State of DCD */
#define IOC4_RXSB_MODEM_VALID 0x40 /* DCD, CTS, and OVERRUN are valid */
#define IOC4_RXSB_DATA_VALID 0x80 /* Data byte, FRAME_ERR PAR_ERR
* & BREAK valid */
/* Bitmasks for serial TX control byte */
#define IOC4_TXCB_INT_WHEN_DONE 0x20 /* Interrupt after this byte is sent */
#define IOC4_TXCB_INVALID 0x00 /* Byte is invalid */
#define IOC4_TXCB_VALID 0x40 /* Byte is valid */
#define IOC4_TXCB_MCR 0x80 /* Data<7:0> to modem control reg */
#define IOC4_TXCB_DELAY 0xc0 /* Delay data<7:0> mSec */
/* Bitmasks for IOC4_SBBR_L */
#define IOC4_SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
/* Bitmasks for IOC4_SSCR_<3:0> */
#define IOC4_SSCR_RX_THRESHOLD 0x000001ff /* Hiwater mark */
#define IOC4_SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
#define IOC4_SSCR_HFC_EN 0x00020000 /* Hardware flow control enabled */
#define IOC4_SSCR_RX_RING_DCD 0x00040000 /* Post RX record on delta-DCD */
#define IOC4_SSCR_RX_RING_CTS 0x00080000 /* Post RX record on delta-CTS */
#define IOC4_SSCR_DIAG 0x00200000 /* Bypass clock divider for sim */
#define IOC4_SSCR_RX_DRAIN 0x08000000 /* Drain RX buffer to memory */
#define IOC4_SSCR_DMA_EN 0x10000000 /* Enable ring buffer DMA */
#define IOC4_SSCR_DMA_PAUSE 0x20000000 /* Pause DMA */
#define IOC4_SSCR_PAUSE_STATE 0x40000000 /* Sets when PAUSE takes effect */
#define IOC4_SSCR_RESET 0x80000000 /* Reset DMA channels */
/* All producer/comsumer pointers are the same bitfield */
#define IOC4_PROD_CONS_PTR_4K 0x00000ff8 /* For 4K buffers */
#define IOC4_PROD_CONS_PTR_1K 0x000003f8 /* For 1K buffers */
#define IOC4_PROD_CONS_PTR_OFF 3
/* Bitmasks for IOC4_SRCIR_<3:0> */
#define IOC4_SRCIR_ARM 0x80000000 /* Arm RX timer */
/* Bitmasks for IOC4_SHADOW_<3:0> */
#define IOC4_SHADOW_DR 0x00000001 /* Data ready */
#define IOC4_SHADOW_OE 0x00000002 /* Overrun error */
#define IOC4_SHADOW_PE 0x00000004 /* Parity error */
#define IOC4_SHADOW_FE 0x00000008 /* Framing error */
#define IOC4_SHADOW_BI 0x00000010 /* Break interrupt */
#define IOC4_SHADOW_THRE 0x00000020 /* Xmit holding register empty */
#define IOC4_SHADOW_TEMT 0x00000040 /* Xmit shift register empty */
#define IOC4_SHADOW_RFCE 0x00000080 /* Char in RX fifo has an error */
#define IOC4_SHADOW_DCTS 0x00010000 /* Delta clear to send */
#define IOC4_SHADOW_DDCD 0x00080000 /* Delta data carrier detect */
#define IOC4_SHADOW_CTS 0x00100000 /* Clear to send */
#define IOC4_SHADOW_DCD 0x00800000 /* Data carrier detect */
#define IOC4_SHADOW_DTR 0x01000000 /* Data terminal ready */
#define IOC4_SHADOW_RTS 0x02000000 /* Request to send */
#define IOC4_SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
#define IOC4_SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
#define IOC4_SHADOW_LOOP 0x10000000 /* Loopback enabled */
/* Bitmasks for IOC4_SRTR_<3:0> */
#define IOC4_SRTR_CNT 0x00000fff /* Reload value for RX timer */
#define IOC4_SRTR_CNT_VAL 0x0fff0000 /* Current value of RX timer */
#define IOC4_SRTR_CNT_VAL_SHIFT 16
#define IOC4_SRTR_HZ 16000 /* SRTR clock frequency */
/* Serial port register map used for DMA and PIO serial I/O */
struct ioc4_serialregs {
uint32_t sscr;
uint32_t stpir;
uint32_t stcir;
uint32_t srpir;
uint32_t srcir;
uint32_t srtr;
uint32_t shadow;
};
/* IOC4 UART register map */
struct ioc4_uartregs {
char i4u_lcr;
union {
char iir; /* read only */
char fcr; /* write only */
} u3;
union {
char ier; /* DLAB == 0 */
char dlm; /* DLAB == 1 */
} u2;
union {
char rbr; /* read only, DLAB == 0 */
char thr; /* write only, DLAB == 0 */
char dll; /* DLAB == 1 */
} u1;
char i4u_scr;
char i4u_msr;
char i4u_lsr;
char i4u_mcr;
};
/* short names */
#define i4u_dll u1.dll
#define i4u_ier u2.ier
#define i4u_dlm u2.dlm
#define i4u_fcr u3.fcr
/* Serial port registers used for DMA serial I/O */
struct ioc4_serial {
uint32_t sbbr01_l;
uint32_t sbbr01_h;
uint32_t sbbr23_l;
uint32_t sbbr23_h;
struct ioc4_serialregs port_0;
struct ioc4_serialregs port_1;
struct ioc4_serialregs port_2;
struct ioc4_serialregs port_3;
struct ioc4_uartregs uart_0;
struct ioc4_uartregs uart_1;
struct ioc4_uartregs uart_2;
struct ioc4_uartregs uart_3;
} ioc4_serial;
/* UART clock speed */
#define IOC4_SER_XIN_CLK_66 66666667
#define IOC4_SER_XIN_CLK_33 33333333
#define IOC4_W_IES 0
#define IOC4_W_IEC 1
typedef void ioc4_intr_func_f(void *, uint32_t);
typedef ioc4_intr_func_f *ioc4_intr_func_t;
static unsigned int Num_of_ioc4_cards;
/* defining this will get you LOTS of great debug info */
//#define DEBUG_INTERRUPTS
#define DPRINT_CONFIG(_x...) ;
//#define DPRINT_CONFIG(_x...) printk _x
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
/* number of characters we want to transmit to the lower level at a time */
#define IOC4_MAX_CHARS 256
#define IOC4_FIFO_CHARS 255
/* Device name we're using */
#define DEVICE_NAME_RS232 "ttyIOC"
#define DEVICE_NAME_RS422 "ttyAIOC"
#define DEVICE_MAJOR 204
#define DEVICE_MINOR_RS232 50
#define DEVICE_MINOR_RS422 84
/* register offsets */
#define IOC4_SERIAL_OFFSET 0x300
/* flags for next_char_state */
#define NCS_BREAK 0x1
#define NCS_PARITY 0x2
#define NCS_FRAMING 0x4
#define NCS_OVERRUN 0x8
/* cause we need SOME parameters ... */
#define MIN_BAUD_SUPPORTED 1200
#define MAX_BAUD_SUPPORTED 115200
/* protocol types supported */
#define PROTO_RS232 3
#define PROTO_RS422 7
/* Notification types */
#define N_DATA_READY 0x01
#define N_OUTPUT_LOWAT 0x02
#define N_BREAK 0x04
#define N_PARITY_ERROR 0x08
#define N_FRAMING_ERROR 0x10
#define N_OVERRUN_ERROR 0x20
#define N_DDCD 0x40
#define N_DCTS 0x80
#define N_ALL_INPUT (N_DATA_READY | N_BREAK | \
N_PARITY_ERROR | N_FRAMING_ERROR | \
N_OVERRUN_ERROR | N_DDCD | N_DCTS)
#define N_ALL_OUTPUT N_OUTPUT_LOWAT
#define N_ALL_ERRORS (N_PARITY_ERROR | N_FRAMING_ERROR | N_OVERRUN_ERROR)
#define N_ALL (N_DATA_READY | N_OUTPUT_LOWAT | N_BREAK | \
N_PARITY_ERROR | N_FRAMING_ERROR | \
N_OVERRUN_ERROR | N_DDCD | N_DCTS)
#define SER_DIVISOR(_x, clk) (((clk) + (_x) * 8) / ((_x) * 16))
#define DIVISOR_TO_BAUD(div, clk) ((clk) / 16 / (div))
/* Some masks */
#define LCR_MASK_BITS_CHAR (UART_LCR_WLEN5 | UART_LCR_WLEN6 \
| UART_LCR_WLEN7 | UART_LCR_WLEN8)
#define LCR_MASK_STOP_BITS (UART_LCR_STOP)
#define PENDING(_p) (readl(&(_p)->ip_mem->sio_ir.raw) & _p->ip_ienb)
#define READ_SIO_IR(_p) readl(&(_p)->ip_mem->sio_ir.raw)
/* Default to 4k buffers */
#ifdef IOC4_1K_BUFFERS
#define RING_BUF_SIZE 1024
#define IOC4_BUF_SIZE_BIT 0
#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_1K
#else
#define RING_BUF_SIZE 4096
#define IOC4_BUF_SIZE_BIT IOC4_SBBR_L_SIZE
#define PROD_CONS_MASK IOC4_PROD_CONS_PTR_4K
#endif
#define TOTAL_RING_BUF_SIZE (RING_BUF_SIZE * 4)
/*
* This is the entry saved by the driver - one per card
*/
#define UART_PORT_MIN 0
#define UART_PORT_RS232 UART_PORT_MIN
#define UART_PORT_RS422 1
#define UART_PORT_COUNT 2 /* one for each mode */
struct ioc4_control {
int ic_irq;
struct {
/* uart ports are allocated here - 1 for rs232, 1 for rs422 */
struct uart_port icp_uart_port[UART_PORT_COUNT];
/* Handy reference material */
struct ioc4_port *icp_port;
} ic_port[IOC4_NUM_SERIAL_PORTS];
struct ioc4_soft *ic_soft;
};
/*
* per-IOC4 data structure
*/
#define MAX_IOC4_INTR_ENTS (8 * sizeof(uint32_t))
struct ioc4_soft {
struct ioc4_misc_regs __iomem *is_ioc4_misc_addr;
struct ioc4_serial __iomem *is_ioc4_serial_addr;
/* Each interrupt type has an entry in the array */
struct ioc4_intr_type {
/*
* Each in-use entry in this array contains at least
* one nonzero bit in sd_bits; no two entries in this
* array have overlapping sd_bits values.
*/
struct ioc4_intr_info {
uint32_t sd_bits;
ioc4_intr_func_f *sd_intr;
void *sd_info;
} is_intr_info[MAX_IOC4_INTR_ENTS];
/* Number of entries active in the above array */
atomic_t is_num_intrs;
} is_intr_type[IOC4_NUM_INTR_TYPES];
/* is_ir_lock must be held while
* modifying sio_ie values, so
* we can be sure that sio_ie is
* not changing when we read it
* along with sio_ir.
*/
spinlock_t is_ir_lock; /* SIO_IE[SC] mod lock */
};
/* Local port info for each IOC4 serial ports */
struct ioc4_port {
struct uart_port *ip_port; /* current active port ptr */
/* Ptrs for all ports */
struct uart_port *ip_all_ports[UART_PORT_COUNT];
/* Back ptrs for this port */
struct ioc4_control *ip_control;
struct pci_dev *ip_pdev;
struct ioc4_soft *ip_ioc4_soft;
/* pci mem addresses */
struct ioc4_misc_regs __iomem *ip_mem;
struct ioc4_serial __iomem *ip_serial;
struct ioc4_serialregs __iomem *ip_serial_regs;
struct ioc4_uartregs __iomem *ip_uart_regs;
/* Ring buffer page for this port */
dma_addr_t ip_dma_ringbuf;
/* vaddr of ring buffer */
struct ring_buffer *ip_cpu_ringbuf;
/* Rings for this port */
struct ring *ip_inring;
struct ring *ip_outring;
/* Hook to port specific values */
struct hooks *ip_hooks;
spinlock_t ip_lock;
/* Various rx/tx parameters */
int ip_baud;
int ip_tx_lowat;
int ip_rx_timeout;
/* Copy of notification bits */
int ip_notify;
/* Shadow copies of various registers so we don't need to PIO
* read them constantly
*/
uint32_t ip_ienb; /* Enabled interrupts */
uint32_t ip_sscr;
uint32_t ip_tx_prod;
uint32_t ip_rx_cons;
int ip_pci_bus_speed;
unsigned char ip_flags;
};
/* tx low water mark. We need to notify the driver whenever tx is getting
* close to empty so it can refill the tx buffer and keep things going.
* Let's assume that if we interrupt 1 ms before the tx goes idle, we'll
* have no trouble getting in more chars in time (I certainly hope so).
*/
#define TX_LOWAT_LATENCY 1000
#define TX_LOWAT_HZ (1000000 / TX_LOWAT_LATENCY)
#define TX_LOWAT_CHARS(baud) (baud / 10 / TX_LOWAT_HZ)
/* Flags per port */
#define INPUT_HIGH 0x01
#define DCD_ON 0x02
#define LOWAT_WRITTEN 0x04
#define READ_ABORTED 0x08
#define PORT_ACTIVE 0x10
#define PORT_INACTIVE 0 /* This is the value when "off" */
/* Since each port has different register offsets and bitmasks
* for everything, we'll store those that we need in tables so we
* don't have to be constantly checking the port we are dealing with.
*/
struct hooks {
uint32_t intr_delta_dcd;
uint32_t intr_delta_cts;
uint32_t intr_tx_mt;
uint32_t intr_rx_timer;
uint32_t intr_rx_high;
uint32_t intr_tx_explicit;
uint32_t intr_dma_error;
uint32_t intr_clear;
uint32_t intr_all;
int rs422_select_pin;
};
static struct hooks hooks_array[IOC4_NUM_SERIAL_PORTS] = {
/* Values for port 0 */
{
IOC4_SIO_IR_S0_DELTA_DCD, IOC4_SIO_IR_S0_DELTA_CTS,
IOC4_SIO_IR_S0_TX_MT, IOC4_SIO_IR_S0_RX_TIMER,
IOC4_SIO_IR_S0_RX_HIGH, IOC4_SIO_IR_S0_TX_EXPLICIT,
IOC4_OTHER_IR_S0_MEMERR,
(IOC4_SIO_IR_S0_TX_MT | IOC4_SIO_IR_S0_RX_FULL |
IOC4_SIO_IR_S0_RX_HIGH | IOC4_SIO_IR_S0_RX_TIMER |
IOC4_SIO_IR_S0_DELTA_DCD | IOC4_SIO_IR_S0_DELTA_CTS |
IOC4_SIO_IR_S0_INT | IOC4_SIO_IR_S0_TX_EXPLICIT),
IOC4_SIO_IR_S0, IOC4_GPPR_UART0_MODESEL_PIN,
},
/* Values for port 1 */
{
IOC4_SIO_IR_S1_DELTA_DCD, IOC4_SIO_IR_S1_DELTA_CTS,
IOC4_SIO_IR_S1_TX_MT, IOC4_SIO_IR_S1_RX_TIMER,
IOC4_SIO_IR_S1_RX_HIGH, IOC4_SIO_IR_S1_TX_EXPLICIT,
IOC4_OTHER_IR_S1_MEMERR,
(IOC4_SIO_IR_S1_TX_MT | IOC4_SIO_IR_S1_RX_FULL |
IOC4_SIO_IR_S1_RX_HIGH | IOC4_SIO_IR_S1_RX_TIMER |
IOC4_SIO_IR_S1_DELTA_DCD | IOC4_SIO_IR_S1_DELTA_CTS |
IOC4_SIO_IR_S1_INT | IOC4_SIO_IR_S1_TX_EXPLICIT),
IOC4_SIO_IR_S1, IOC4_GPPR_UART1_MODESEL_PIN,
},
/* Values for port 2 */
{
IOC4_SIO_IR_S2_DELTA_DCD, IOC4_SIO_IR_S2_DELTA_CTS,
IOC4_SIO_IR_S2_TX_MT, IOC4_SIO_IR_S2_RX_TIMER,
IOC4_SIO_IR_S2_RX_HIGH, IOC4_SIO_IR_S2_TX_EXPLICIT,
IOC4_OTHER_IR_S2_MEMERR,
(IOC4_SIO_IR_S2_TX_MT | IOC4_SIO_IR_S2_RX_FULL |
IOC4_SIO_IR_S2_RX_HIGH | IOC4_SIO_IR_S2_RX_TIMER |
IOC4_SIO_IR_S2_DELTA_DCD | IOC4_SIO_IR_S2_DELTA_CTS |
IOC4_SIO_IR_S2_INT | IOC4_SIO_IR_S2_TX_EXPLICIT),
IOC4_SIO_IR_S2, IOC4_GPPR_UART2_MODESEL_PIN,
},
/* Values for port 3 */
{
IOC4_SIO_IR_S3_DELTA_DCD, IOC4_SIO_IR_S3_DELTA_CTS,
IOC4_SIO_IR_S3_TX_MT, IOC4_SIO_IR_S3_RX_TIMER,
IOC4_SIO_IR_S3_RX_HIGH, IOC4_SIO_IR_S3_TX_EXPLICIT,
IOC4_OTHER_IR_S3_MEMERR,
(IOC4_SIO_IR_S3_TX_MT | IOC4_SIO_IR_S3_RX_FULL |
IOC4_SIO_IR_S3_RX_HIGH | IOC4_SIO_IR_S3_RX_TIMER |
IOC4_SIO_IR_S3_DELTA_DCD | IOC4_SIO_IR_S3_DELTA_CTS |
IOC4_SIO_IR_S3_INT | IOC4_SIO_IR_S3_TX_EXPLICIT),
IOC4_SIO_IR_S3, IOC4_GPPR_UART3_MODESEL_PIN,
}
};
/* A ring buffer entry */
struct ring_entry {
union {
struct {
uint32_t alldata;
uint32_t allsc;
} all;
struct {
char data[4]; /* data bytes */
char sc[4]; /* status/control */
} s;
} u;
};
/* Test the valid bits in any of the 4 sc chars using "allsc" member */
#define RING_ANY_VALID \
((uint32_t)(IOC4_RXSB_MODEM_VALID | IOC4_RXSB_DATA_VALID) * 0x01010101)
#define ring_sc u.s.sc
#define ring_data u.s.data
#define ring_allsc u.all.allsc
/* Number of entries per ring buffer. */
#define ENTRIES_PER_RING (RING_BUF_SIZE / (int) sizeof(struct ring_entry))
/* An individual ring */
struct ring {
struct ring_entry entries[ENTRIES_PER_RING];
};
/* The whole enchilada */
struct ring_buffer {
struct ring TX_0_OR_2;
struct ring RX_0_OR_2;
struct ring TX_1_OR_3;
struct ring RX_1_OR_3;
};
/* Get a ring from a port struct */
#define RING(_p, _wh) &(((struct ring_buffer *)((_p)->ip_cpu_ringbuf))->_wh)
/* Infinite loop detection.
*/
#define MAXITER 10000000
/* Prototypes */
static void receive_chars(struct uart_port *);
static void handle_intr(void *arg, uint32_t sio_ir);
/*
* port_is_active - determines if this port is currently active
* @port: ptr to soft struct for this port
* @uart_port: uart port to test for
*/
static inline int port_is_active(struct ioc4_port *port,
struct uart_port *uart_port)
{
if (port) {
if ((port->ip_flags & PORT_ACTIVE)
&& (port->ip_port == uart_port))
return 1;
}
return 0;
}
/**
* write_ireg - write the interrupt regs
* @ioc4_soft: ptr to soft struct for this port
* @val: value to write
* @which: which register
* @type: which ireg set
*/
static inline void
write_ireg(struct ioc4_soft *ioc4_soft, uint32_t val, int which, int type)
{
struct ioc4_misc_regs __iomem *mem = ioc4_soft->is_ioc4_misc_addr;
unsigned long flags;
spin_lock_irqsave(&ioc4_soft->is_ir_lock, flags);
switch (type) {
case IOC4_SIO_INTR_TYPE:
switch (which) {
case IOC4_W_IES:
writel(val, &mem->sio_ies.raw);
break;
case IOC4_W_IEC:
writel(val, &mem->sio_iec.raw);
break;
}
break;
case IOC4_OTHER_INTR_TYPE:
switch (which) {
case IOC4_W_IES:
writel(val, &mem->other_ies.raw);
break;
case IOC4_W_IEC:
writel(val, &mem->other_iec.raw);
break;
}
break;
default:
break;
}
spin_unlock_irqrestore(&ioc4_soft->is_ir_lock, flags);
}
/**
* set_baud - Baud rate setting code
* @port: port to set
* @baud: baud rate to use
*/
static int set_baud(struct ioc4_port *port, int baud)
{
int actual_baud;
int diff;
int lcr;
unsigned short divisor;
struct ioc4_uartregs __iomem *uart;
divisor = SER_DIVISOR(baud, port->ip_pci_bus_speed);
if (!divisor)
return 1;
actual_baud = DIVISOR_TO_BAUD(divisor, port->ip_pci_bus_speed);
diff = actual_baud - baud;
if (diff < 0)
diff = -diff;
/* If we're within 1%, we've found a match */
if (diff * 100 > actual_baud)
return 1;
uart = port->ip_uart_regs;
lcr = readb(&uart->i4u_lcr);
writeb(lcr | UART_LCR_DLAB, &uart->i4u_lcr);
writeb((unsigned char)divisor, &uart->i4u_dll);
writeb((unsigned char)(divisor >> 8), &uart->i4u_dlm);
writeb(lcr, &uart->i4u_lcr);
return 0;
}
/**
* get_ioc4_port - given a uart port, return the control structure
* @port: uart port
* @set: set this port as current
*/
static struct ioc4_port *get_ioc4_port(struct uart_port *the_port, int set)
{
struct ioc4_driver_data *idd = dev_get_drvdata(the_port->dev);
struct ioc4_control *control = idd->idd_serial_data;
struct ioc4_port *port;
int port_num, port_type;
if (control) {
for ( port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS;
port_num++ ) {
port = control->ic_port[port_num].icp_port;
if (!port)
continue;
for (port_type = UART_PORT_MIN;
port_type < UART_PORT_COUNT;
port_type++) {
if (the_port == port->ip_all_ports
[port_type]) {
/* set local copy */
if (set) {
port->ip_port = the_port;
}
return port;
}
}
}
}
return NULL;
}
/* The IOC4 hardware provides no atomic way to determine if interrupts
* are pending since two reads are required to do so. The handler must
* read the SIO_IR and the SIO_IES, and take the logical and of the
* two. When this value is zero, all interrupts have been serviced and
* the handler may return.
*
* This has the unfortunate "hole" that, if some other CPU or
* some other thread or some higher level interrupt manages to
* modify SIO_IE between our reads of SIO_IR and SIO_IE, we may
* think we have observed SIO_IR&SIO_IE==0 when in fact this
* condition never really occurred.
*
* To solve this, we use a simple spinlock that must be held
* whenever modifying SIO_IE; holding this lock while observing
* both SIO_IR and SIO_IE guarantees that we do not falsely
* conclude that no enabled interrupts are pending.
*/
static inline uint32_t
pending_intrs(struct ioc4_soft *soft, int type)
{
struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
unsigned long flag;
uint32_t intrs = 0;
BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
|| (type == IOC4_OTHER_INTR_TYPE)));
spin_lock_irqsave(&soft->is_ir_lock, flag);
switch (type) {
case IOC4_SIO_INTR_TYPE:
intrs = readl(&mem->sio_ir.raw) & readl(&mem->sio_ies.raw);
break;
case IOC4_OTHER_INTR_TYPE:
intrs = readl(&mem->other_ir.raw) & readl(&mem->other_ies.raw);
/* Don't process any ATA interrupte */
intrs &= ~(IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
break;
default:
break;
}
spin_unlock_irqrestore(&soft->is_ir_lock, flag);
return intrs;
}
/**
* port_init - Initialize the sio and ioc4 hardware for a given port
* called per port from attach...
* @port: port to initialize
*/
static int inline port_init(struct ioc4_port *port)
{
uint32_t sio_cr;
struct hooks *hooks = port->ip_hooks;
struct ioc4_uartregs __iomem *uart;
/* Idle the IOC4 serial interface */
writel(IOC4_SSCR_RESET, &port->ip_serial_regs->sscr);
/* Wait until any pending bus activity for this port has ceased */
do
sio_cr = readl(&port->ip_mem->sio_cr.raw);
while (!(sio_cr & IOC4_SIO_CR_SIO_DIAG_IDLE));
/* Finish reset sequence */
writel(0, &port->ip_serial_regs->sscr);
/* Once RESET is done, reload cached tx_prod and rx_cons values
* and set rings to empty by making prod == cons
*/
port->ip_tx_prod = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
writel(port->ip_tx_prod, &port->ip_serial_regs->stpir);
port->ip_rx_cons = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
/* Disable interrupts for this 16550 */
uart = port->ip_uart_regs;
writeb(0, &uart->i4u_lcr);
writeb(0, &uart->i4u_ier);
/* Set the default baud */
set_baud(port, port->ip_baud);
/* Set line control to 8 bits no parity */
writeb(UART_LCR_WLEN8 | 0, &uart->i4u_lcr);
/* UART_LCR_STOP == 1 stop */
/* Enable the FIFOs */
writeb(UART_FCR_ENABLE_FIFO, &uart->i4u_fcr);
/* then reset 16550 FIFOs */
writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
&uart->i4u_fcr);
/* Clear modem control register */
writeb(0, &uart->i4u_mcr);
/* Clear deltas in modem status register */
readb(&uart->i4u_msr);
/* Only do this once per port pair */
if (port->ip_hooks == &hooks_array[0]
|| port->ip_hooks == &hooks_array[2]) {
unsigned long ring_pci_addr;
uint32_t __iomem *sbbr_l;
uint32_t __iomem *sbbr_h;
if (port->ip_hooks == &hooks_array[0]) {
sbbr_l = &port->ip_serial->sbbr01_l;
sbbr_h = &port->ip_serial->sbbr01_h;
} else {
sbbr_l = &port->ip_serial->sbbr23_l;
sbbr_h = &port->ip_serial->sbbr23_h;
}
ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n",
__func__, ring_pci_addr));
writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h);
writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l);
}
/* Set the receive timeout value to 10 msec */
writel(IOC4_SRTR_HZ / 100, &port->ip_serial_regs->srtr);
/* Set rx threshold, enable DMA */
/* Set high water mark at 3/4 of full ring */
port->ip_sscr = (ENTRIES_PER_RING * 3 / 4);
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
/* Disable and clear all serial related interrupt bits */
write_ireg(port->ip_ioc4_soft, hooks->intr_clear,
IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
port->ip_ienb &= ~hooks->intr_clear;
writel(hooks->intr_clear, &port->ip_mem->sio_ir.raw);
return 0;
}
/**
* handle_dma_error_intr - service any pending DMA error interrupts for the
* given port - 2nd level called via sd_intr
* @arg: handler arg
* @other_ir: ioc4regs
*/
static void handle_dma_error_intr(void *arg, uint32_t other_ir)
{
struct ioc4_port *port = (struct ioc4_port *)arg;
struct hooks *hooks = port->ip_hooks;
unsigned long flags;
spin_lock_irqsave(&port->ip_lock, flags);
/* ACK the interrupt */
writel(hooks->intr_dma_error, &port->ip_mem->other_ir.raw);
if (readl(&port->ip_mem->pci_err_addr_l.raw) & IOC4_PCI_ERR_ADDR_VLD) {
printk(KERN_ERR
"PCI error address is 0x%llx, "
"master is serial port %c %s\n",
(((uint64_t)readl(&port->ip_mem->pci_err_addr_h)
<< 32)
| readl(&port->ip_mem->pci_err_addr_l.raw))
& IOC4_PCI_ERR_ADDR_ADDR_MSK, '1' +
((char)(readl(&port->ip_mem->pci_err_addr_l.raw) &
IOC4_PCI_ERR_ADDR_MST_NUM_MSK) >> 1),
(readl(&port->ip_mem->pci_err_addr_l.raw)
& IOC4_PCI_ERR_ADDR_MST_TYP_MSK)
? "RX" : "TX");
if (readl(&port->ip_mem->pci_err_addr_l.raw)
& IOC4_PCI_ERR_ADDR_MUL_ERR) {
printk(KERN_ERR
"Multiple errors occurred\n");
}
}
spin_unlock_irqrestore(&port->ip_lock, flags);
/* Re-enable DMA error interrupts */
write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error, IOC4_W_IES,
IOC4_OTHER_INTR_TYPE);
}
/**
* intr_connect - interrupt connect function
* @soft: soft struct for this card
* @type: interrupt type
* @intrbits: bit pattern to set
* @intr: handler function
* @info: handler arg
*/
static void
intr_connect(struct ioc4_soft *soft, int type,
uint32_t intrbits, ioc4_intr_func_f * intr, void *info)
{
int i;
struct ioc4_intr_info *intr_ptr;
BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
|| (type == IOC4_OTHER_INTR_TYPE)));
i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
/* Save off the lower level interrupt handler */
intr_ptr = &soft->is_intr_type[type].is_intr_info[i];
intr_ptr->sd_bits = intrbits;
intr_ptr->sd_intr = intr;
intr_ptr->sd_info = info;
}
/**
* ioc4_intr - Top level IOC4 interrupt handler.
* @irq: irq value
* @arg: handler arg
*/
static irqreturn_t ioc4_intr(int irq, void *arg)
{
struct ioc4_soft *soft;
uint32_t this_ir, this_mir;
int xx, num_intrs = 0;
int intr_type;
int handled = 0;
struct ioc4_intr_info *intr_info;
soft = arg;
for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
num_intrs = (int)atomic_read(
&soft->is_intr_type[intr_type].is_num_intrs);
this_mir = this_ir = pending_intrs(soft, intr_type);
/* Farm out the interrupt to the various drivers depending on
* which interrupt bits are set.
*/
for (xx = 0; xx < num_intrs; xx++) {
intr_info = &soft->is_intr_type[intr_type].is_intr_info[xx];
if ((this_mir = this_ir & intr_info->sd_bits)) {
/* Disable owned interrupts, call handler */
handled++;
write_ireg(soft, intr_info->sd_bits, IOC4_W_IEC,
intr_type);
intr_info->sd_intr(intr_info->sd_info, this_mir);
this_ir &= ~this_mir;
}
}
}
#ifdef DEBUG_INTERRUPTS
{
struct ioc4_misc_regs __iomem *mem = soft->is_ioc4_misc_addr;
unsigned long flag;
spin_lock_irqsave(&soft->is_ir_lock, flag);
printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x "
"other_ir 0x%x other_ies 0x%x mask 0x%x\n",
__func__, __LINE__,
(void *)mem, readl(&mem->sio_ir.raw),
readl(&mem->sio_ies.raw),
readl(&mem->other_ir.raw),
readl(&mem->other_ies.raw),
IOC4_OTHER_IR_ATA_INT | IOC4_OTHER_IR_ATA_MEMERR);
spin_unlock_irqrestore(&soft->is_ir_lock, flag);
}
#endif
return handled ? IRQ_HANDLED : IRQ_NONE;
}
/**
* ioc4_attach_local - Device initialization.
* Called at *_attach() time for each
* IOC4 with serial ports in the system.
* @idd: Master module data for this IOC4
*/
static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
{
struct ioc4_port *port;
struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS];
int port_number;
uint16_t ioc4_revid_min = 62;
uint16_t ioc4_revid;
struct pci_dev *pdev = idd->idd_pdev;
struct ioc4_control* control = idd->idd_serial_data;
struct ioc4_soft *soft = control->ic_soft;
void __iomem *ioc4_misc = idd->idd_misc_regs;
void __iomem *ioc4_serial = soft->is_ioc4_serial_addr;
/* IOC4 firmware must be at least rev 62 */
pci_read_config_word(pdev, PCI_COMMAND_SPECIAL, &ioc4_revid);
printk(KERN_INFO "IOC4 firmware revision %d\n", ioc4_revid);
if (ioc4_revid < ioc4_revid_min) {
printk(KERN_WARNING
"IOC4 serial not supported on firmware rev %d, "
"please upgrade to rev %d or higher\n",
ioc4_revid, ioc4_revid_min);
return -EPERM;
}
BUG_ON(ioc4_misc == NULL);
BUG_ON(ioc4_serial == NULL);
/* Create port structures for each port */
for (port_number = 0; port_number < IOC4_NUM_SERIAL_PORTS;
port_number++) {
port = kzalloc(sizeof(struct ioc4_port), GFP_KERNEL);
if (!port) {
printk(KERN_WARNING
"IOC4 serial memory not available for port\n");
return -ENOMEM;
}
spin_lock_init(&port->ip_lock);
/* we need to remember the previous ones, to point back to
* them farther down - setting up the ring buffers.
*/
ports[port_number] = port;
/* Allocate buffers and jumpstart the hardware. */
control->ic_port[port_number].icp_port = port;
port->ip_ioc4_soft = soft;
port->ip_pdev = pdev;
port->ip_ienb = 0;
/* Use baud rate calculations based on detected PCI
* bus speed. Simply test whether the PCI clock is
* running closer to 66MHz or 33MHz.
*/
if (idd->count_period/IOC4_EXTINT_COUNT_DIVISOR < 20) {
port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_66;
} else {
port->ip_pci_bus_speed = IOC4_SER_XIN_CLK_33;
}
port->ip_baud = 9600;
port->ip_control = control;
port->ip_mem = ioc4_misc;
port->ip_serial = ioc4_serial;
/* point to the right hook */
port->ip_hooks = &hooks_array[port_number];
/* Get direct hooks to the serial regs and uart regs
* for this port
*/
switch (port_number) {
case 0:
port->ip_serial_regs = &(port->ip_serial->port_0);
port->ip_uart_regs = &(port->ip_serial->uart_0);
break;
case 1:
port->ip_serial_regs = &(port->ip_serial->port_1);
port->ip_uart_regs = &(port->ip_serial->uart_1);
break;
case 2:
port->ip_serial_regs = &(port->ip_serial->port_2);
port->ip_uart_regs = &(port->ip_serial->uart_2);
break;
default:
case 3:
port->ip_serial_regs = &(port->ip_serial->port_3);
port->ip_uart_regs = &(port->ip_serial->uart_3);
break;
}
/* ring buffers are 1 to a pair of ports */
if (port_number && (port_number & 1)) {
/* odd use the evens buffer */
port->ip_dma_ringbuf =
ports[port_number - 1]->ip_dma_ringbuf;
port->ip_cpu_ringbuf =
ports[port_number - 1]->ip_cpu_ringbuf;
port->ip_inring = RING(port, RX_1_OR_3);
port->ip_outring = RING(port, TX_1_OR_3);
} else {
if (port->ip_dma_ringbuf == 0) {
port->ip_cpu_ringbuf = pci_alloc_consistent
(pdev, TOTAL_RING_BUF_SIZE,
&port->ip_dma_ringbuf);
}
BUG_ON(!((((int64_t)port->ip_dma_ringbuf) &
(TOTAL_RING_BUF_SIZE - 1)) == 0));
DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p "
"ip_dma_ringbuf 0x%p\n",
__func__,
(void *)port->ip_cpu_ringbuf,
(void *)port->ip_dma_ringbuf));
port->ip_inring = RING(port, RX_0_OR_2);
port->ip_outring = RING(port, TX_0_OR_2);
}
DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p",
__func__,
port_number, (void *)port, (void *)control));
DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
(void *)port->ip_serial_regs,
(void *)port->ip_uart_regs));
/* Initialize the hardware for IOC4 */
port_init(port);
DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p "
"outring 0x%p\n",
__func__,
port_number, (void *)port,
(void *)port->ip_inring,
(void *)port->ip_outring));
/* Attach interrupt handlers */
intr_connect(soft, IOC4_SIO_INTR_TYPE,
GET_SIO_IR(port_number),
handle_intr, port);
intr_connect(soft, IOC4_OTHER_INTR_TYPE,
GET_OTHER_IR(port_number),
handle_dma_error_intr, port);
}
return 0;
}
/**
* enable_intrs - enable interrupts
* @port: port to enable
* @mask: mask to use
*/
static void enable_intrs(struct ioc4_port *port, uint32_t mask)
{
struct hooks *hooks = port->ip_hooks;
if ((port->ip_ienb & mask) != mask) {
write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IES,
IOC4_SIO_INTR_TYPE);
port->ip_ienb |= mask;
}
if (port->ip_ienb)
write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
IOC4_W_IES, IOC4_OTHER_INTR_TYPE);
}
/**
* local_open - local open a port
* @port: port to open
*/
static inline int local_open(struct ioc4_port *port)
{
int spiniter = 0;
port->ip_flags = PORT_ACTIVE;
/* Pause the DMA interface if necessary */
if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
&port->ip_serial_regs->sscr);
while((readl(&port->ip_serial_regs-> sscr)
& IOC4_SSCR_PAUSE_STATE) == 0) {
spiniter++;
if (spiniter > MAXITER) {
port->ip_flags = PORT_INACTIVE;
return -1;
}
}
}
/* Reset the input fifo. If the uart received chars while the port
* was closed and DMA is not enabled, the uart may have a bunch of
* chars hanging around in its rx fifo which will not be discarded
* by rclr in the upper layer. We must get rid of them here.
*/
writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
&port->ip_uart_regs->i4u_fcr);
writeb(UART_LCR_WLEN8, &port->ip_uart_regs->i4u_lcr);
/* UART_LCR_STOP == 1 stop */
/* Re-enable DMA, set default threshold to intr whenever there is
* data available.
*/
port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
port->ip_sscr |= 1; /* default threshold */
/* Plug in the new sscr. This implicitly clears the DMA_PAUSE
* flag if it was set above
*/
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
port->ip_tx_lowat = 1;
return 0;
}
/**
* set_rx_timeout - Set rx timeout and threshold values.
* @port: port to use
* @timeout: timeout value in ticks
*/
static inline int set_rx_timeout(struct ioc4_port *port, int timeout)
{
int threshold;
port->ip_rx_timeout = timeout;
/* Timeout is in ticks. Let's figure out how many chars we
* can receive at the current baud rate in that interval
* and set the rx threshold to that amount. There are 4 chars
* per ring entry, so we'll divide the number of chars that will
* arrive in timeout by 4.
* So .... timeout * baud / 10 / HZ / 4, with HZ = 100.
*/
threshold = timeout * port->ip_baud / 4000;
if (threshold == 0)
threshold = 1; /* otherwise we'll intr all the time! */
if ((unsigned)threshold > (unsigned)IOC4_SSCR_RX_THRESHOLD)
return 1;
port->ip_sscr &= ~IOC4_SSCR_RX_THRESHOLD;
port->ip_sscr |= threshold;
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
/* Now set the rx timeout to the given value
* again timeout * IOC4_SRTR_HZ / HZ
*/
timeout = timeout * IOC4_SRTR_HZ / 100;
if (timeout > IOC4_SRTR_CNT)
timeout = IOC4_SRTR_CNT;
writel(timeout, &port->ip_serial_regs->srtr);
return 0;
}
/**
* config_port - config the hardware
* @port: port to config
* @baud: baud rate for the port
* @byte_size: data size
* @stop_bits: number of stop bits
* @parenb: parity enable ?
* @parodd: odd parity ?
*/
static inline int
config_port(struct ioc4_port *port,
int baud, int byte_size, int stop_bits, int parenb, int parodd)
{
char lcr, sizebits;
int spiniter = 0;
DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n",
__func__, baud, byte_size, stop_bits, parenb, parodd));
if (set_baud(port, baud))
return 1;
switch (byte_size) {
case 5:
sizebits = UART_LCR_WLEN5;
break;
case 6:
sizebits = UART_LCR_WLEN6;
break;
case 7:
sizebits = UART_LCR_WLEN7;
break;
case 8:
sizebits = UART_LCR_WLEN8;
break;
default:
return 1;
}
/* Pause the DMA interface if necessary */
if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
&port->ip_serial_regs->sscr);
while((readl(&port->ip_serial_regs->sscr)
& IOC4_SSCR_PAUSE_STATE) == 0) {
spiniter++;
if (spiniter > MAXITER)
return -1;
}
}
/* Clear relevant fields in lcr */
lcr = readb(&port->ip_uart_regs->i4u_lcr);
lcr &= ~(LCR_MASK_BITS_CHAR | UART_LCR_EPAR |
UART_LCR_PARITY | LCR_MASK_STOP_BITS);
/* Set byte size in lcr */
lcr |= sizebits;
/* Set parity */
if (parenb) {
lcr |= UART_LCR_PARITY;
if (!parodd)
lcr |= UART_LCR_EPAR;
}
/* Set stop bits */
if (stop_bits)
lcr |= UART_LCR_STOP /* 2 stop bits */ ;
writeb(lcr, &port->ip_uart_regs->i4u_lcr);
/* Re-enable the DMA interface if necessary */
if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
}
port->ip_baud = baud;
/* When we get within this number of ring entries of filling the
* entire ring on tx, place an EXPLICIT intr to generate a lowat
* notification when output has drained.
*/
port->ip_tx_lowat = (TX_LOWAT_CHARS(baud) + 3) / 4;
if (port->ip_tx_lowat == 0)
port->ip_tx_lowat = 1;
set_rx_timeout(port, 2);
return 0;
}
/**
* do_write - Write bytes to the port. Returns the number of bytes
* actually written. Called from transmit_chars
* @port: port to use
* @buf: the stuff to write
* @len: how many bytes in 'buf'
*/
static inline int do_write(struct ioc4_port *port, char *buf, int len)
{
int prod_ptr, cons_ptr, total = 0;
struct ring *outring;
struct ring_entry *entry;
struct hooks *hooks = port->ip_hooks;
BUG_ON(!(len >= 0));
prod_ptr = port->ip_tx_prod;
cons_ptr = readl(&port->ip_serial_regs->stcir) & PROD_CONS_MASK;
outring = port->ip_outring;
/* Maintain a 1-entry red-zone. The ring buffer is full when
* (cons - prod) % ring_size is 1. Rather than do this subtraction
* in the body of the loop, I'll do it now.
*/
cons_ptr = (cons_ptr - (int)sizeof(struct ring_entry)) & PROD_CONS_MASK;
/* Stuff the bytes into the output */
while ((prod_ptr != cons_ptr) && (len > 0)) {
int xx;
/* Get 4 bytes (one ring entry) at a time */
entry = (struct ring_entry *)((caddr_t) outring + prod_ptr);
/* Invalidate all entries */
entry->ring_allsc = 0;
/* Copy in some bytes */
for (xx = 0; (xx < 4) && (len > 0); xx++) {
entry->ring_data[xx] = *buf++;
entry->ring_sc[xx] = IOC4_TXCB_VALID;
len--;
total++;
}
/* If we are within some small threshold of filling up the
* entire ring buffer, we must place an EXPLICIT intr here
* to generate a lowat interrupt in case we subsequently
* really do fill up the ring and the caller goes to sleep.
* No need to place more than one though.
*/
if (!(port->ip_flags & LOWAT_WRITTEN) &&
((cons_ptr - prod_ptr) & PROD_CONS_MASK)
<= port->ip_tx_lowat
* (int)sizeof(struct ring_entry)) {
port->ip_flags |= LOWAT_WRITTEN;
entry->ring_sc[0] |= IOC4_TXCB_INT_WHEN_DONE;
}
/* Go on to next entry */
prod_ptr += sizeof(struct ring_entry);
prod_ptr &= PROD_CONS_MASK;
}
/* If we sent something, start DMA if necessary */
if (total > 0 && !(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
port->ip_sscr |= IOC4_SSCR_DMA_EN;
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
}
/* Store the new producer pointer. If tx is disabled, we stuff the
* data into the ring buffer, but we don't actually start tx.
*/
if (!uart_tx_stopped(port->ip_port)) {
writel(prod_ptr, &port->ip_serial_regs->stpir);
/* If we are now transmitting, enable tx_mt interrupt so we
* can disable DMA if necessary when the tx finishes.
*/
if (total > 0)
enable_intrs(port, hooks->intr_tx_mt);
}
port->ip_tx_prod = prod_ptr;
return total;
}
/**
* disable_intrs - disable interrupts
* @port: port to enable
* @mask: mask to use
*/
static void disable_intrs(struct ioc4_port *port, uint32_t mask)
{
struct hooks *hooks = port->ip_hooks;
if (port->ip_ienb & mask) {
write_ireg(port->ip_ioc4_soft, mask, IOC4_W_IEC,
IOC4_SIO_INTR_TYPE);
port->ip_ienb &= ~mask;
}
if (!port->ip_ienb)
write_ireg(port->ip_ioc4_soft, hooks->intr_dma_error,
IOC4_W_IEC, IOC4_OTHER_INTR_TYPE);
}
/**
* set_notification - Modify event notification
* @port: port to use
* @mask: events mask
* @set_on: set ?
*/
static int set_notification(struct ioc4_port *port, int mask, int set_on)
{
struct hooks *hooks = port->ip_hooks;
uint32_t intrbits, sscrbits;
BUG_ON(!mask);
intrbits = sscrbits = 0;
if (mask & N_DATA_READY)
intrbits |= (hooks->intr_rx_timer | hooks->intr_rx_high);
if (mask & N_OUTPUT_LOWAT)
intrbits |= hooks->intr_tx_explicit;
if (mask & N_DDCD) {
intrbits |= hooks->intr_delta_dcd;
sscrbits |= IOC4_SSCR_RX_RING_DCD;
}
if (mask & N_DCTS)
intrbits |= hooks->intr_delta_cts;
if (set_on) {
enable_intrs(port, intrbits);
port->ip_notify |= mask;
port->ip_sscr |= sscrbits;
} else {
disable_intrs(port, intrbits);
port->ip_notify &= ~mask;
port->ip_sscr &= ~sscrbits;
}
/* We require DMA if either DATA_READY or DDCD notification is
* currently requested. If neither of these is requested and
* there is currently no tx in progress, DMA may be disabled.
*/
if (port->ip_notify & (N_DATA_READY | N_DDCD))
port->ip_sscr |= IOC4_SSCR_DMA_EN;
else if (!(port->ip_ienb & hooks->intr_tx_mt))
port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
return 0;
}
/**
* set_mcr - set the master control reg
* @the_port: port to use
* @mask1: mcr mask
* @mask2: shadow mask
*/
static inline int set_mcr(struct uart_port *the_port,
int mask1, int mask2)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
uint32_t shadow;
int spiniter = 0;
char mcr;
if (!port)
return -1;
/* Pause the DMA interface if necessary */
if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
writel(port->ip_sscr | IOC4_SSCR_DMA_PAUSE,
&port->ip_serial_regs->sscr);
while ((readl(&port->ip_serial_regs->sscr)
& IOC4_SSCR_PAUSE_STATE) == 0) {
spiniter++;
if (spiniter > MAXITER)
return -1;
}
}
shadow = readl(&port->ip_serial_regs->shadow);
mcr = (shadow & 0xff000000) >> 24;
/* Set new value */
mcr |= mask1;
shadow |= mask2;
writeb(mcr, &port->ip_uart_regs->i4u_mcr);
writel(shadow, &port->ip_serial_regs->shadow);
/* Re-enable the DMA interface if necessary */
if (port->ip_sscr & IOC4_SSCR_DMA_EN) {
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
}
return 0;
}
/**
* ioc4_set_proto - set the protocol for the port
* @port: port to use
* @proto: protocol to use
*/
static int ioc4_set_proto(struct ioc4_port *port, int proto)
{
struct hooks *hooks = port->ip_hooks;
switch (proto) {
case PROTO_RS232:
/* Clear the appropriate GIO pin */
writel(0, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
break;
case PROTO_RS422:
/* Set the appropriate GIO pin */
writel(1, (&port->ip_mem->gppr[hooks->rs422_select_pin].raw));
break;
default:
return 1;
}
return 0;
}
/**
* transmit_chars - upper level write, called with ip_lock
* @the_port: port to write
*/
static void transmit_chars(struct uart_port *the_port)
{
int xmit_count, tail, head;
int result;
char *start;
struct tty_struct *tty;
struct ioc4_port *port = get_ioc4_port(the_port, 0);
struct uart_state *state;
if (!the_port)
return;
if (!port)
return;
state = the_port->state;
tty = state->port.tty;
if (uart_circ_empty(&state->xmit) || uart_tx_stopped(the_port)) {
/* Nothing to do or hw stopped */
set_notification(port, N_ALL_OUTPUT, 0);
return;
}
head = state->xmit.head;
tail = state->xmit.tail;
start = (char *)&state->xmit.buf[tail];
/* write out all the data or until the end of the buffer */
xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail);
if (xmit_count > 0) {
result = do_write(port, start, xmit_count);
if (result > 0) {
/* booking */
xmit_count -= result;
the_port->icount.tx += result;
/* advance the pointers */
tail += result;
tail &= UART_XMIT_SIZE - 1;
state->xmit.tail = tail;
start = (char *)&state->xmit.buf[tail];
}
}
if (uart_circ_chars_pending(&state->xmit) < WAKEUP_CHARS)
uart_write_wakeup(the_port);
if (uart_circ_empty(&state->xmit)) {
set_notification(port, N_OUTPUT_LOWAT, 0);
} else {
set_notification(port, N_OUTPUT_LOWAT, 1);
}
}
/**
* ioc4_change_speed - change the speed of the port
* @the_port: port to change
* @new_termios: new termios settings
* @old_termios: old termios settings
*/
static void
ioc4_change_speed(struct uart_port *the_port,
struct ktermios *new_termios, struct ktermios *old_termios)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
int baud, bits;
unsigned cflag, iflag;
int new_parity = 0, new_parity_enable = 0, new_stop = 0, new_data = 8;
struct uart_state *state = the_port->state;
cflag = new_termios->c_cflag;
iflag = new_termios->c_iflag;
switch (cflag & CSIZE) {
case CS5:
new_data = 5;
bits = 7;
break;
case CS6:
new_data = 6;
bits = 8;
break;
case CS7:
new_data = 7;
bits = 9;
break;
case CS8:
new_data = 8;
bits = 10;
break;
default:
/* cuz we always need a default ... */
new_data = 5;
bits = 7;
break;
}
if (cflag & CSTOPB) {
bits++;
new_stop = 1;
}
if (cflag & PARENB) {
bits++;
new_parity_enable = 1;
if (cflag & PARODD)
new_parity = 1;
}
baud = uart_get_baud_rate(the_port, new_termios, old_termios,
MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud));
/* default is 9600 */
if (!baud)
baud = 9600;
if (!the_port->fifosize)
the_port->fifosize = IOC4_FIFO_CHARS;
the_port->timeout = ((the_port->fifosize * HZ * bits) / (baud / 10));
the_port->timeout += HZ / 50; /* Add .02 seconds of slop */
the_port->ignore_status_mask = N_ALL_INPUT;
state->port.tty->low_latency = 1;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~(N_PARITY_ERROR
| N_FRAMING_ERROR);
if (iflag & IGNBRK) {
the_port->ignore_status_mask &= ~N_BREAK;
if (iflag & IGNPAR)
the_port->ignore_status_mask &= ~N_OVERRUN_ERROR;
}
if (!(cflag & CREAD)) {
/* ignore everything */
the_port->ignore_status_mask &= ~N_DATA_READY;
}
if (cflag & CRTSCTS) {
port->ip_sscr |= IOC4_SSCR_HFC_EN;
}
else {
port->ip_sscr &= ~IOC4_SSCR_HFC_EN;
}
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
/* Set the configuration and proper notification call */
DPRINT_CONFIG(("%s : port 0x%p cflag 0%o "
"config_port(baud %d data %d stop %d p enable %d parity %d),"
" notification 0x%x\n",
__func__, (void *)port, cflag, baud, new_data, new_stop,
new_parity_enable, new_parity, the_port->ignore_status_mask));
if ((config_port(port, baud, /* baud */
new_data, /* byte size */
new_stop, /* stop bits */
new_parity_enable, /* set parity */
new_parity)) >= 0) { /* parity 1==odd */
set_notification(port, the_port->ignore_status_mask, 1);
}
}
/**
* ic4_startup_local - Start up the serial port - returns >= 0 if no errors
* @the_port: Port to operate on
*/
static inline int ic4_startup_local(struct uart_port *the_port)
{
struct ioc4_port *port;
struct uart_state *state;
if (!the_port)
return -1;
port = get_ioc4_port(the_port, 0);
if (!port)
return -1;
state = the_port->state;
local_open(port);
/* set the protocol - mapbase has the port type */
ioc4_set_proto(port, the_port->mapbase);
/* set the speed of the serial port */
ioc4_change_speed(the_port, state->port.tty->termios,
(struct ktermios *)0);
return 0;
}
/*
* ioc4_cb_output_lowat - called when the output low water mark is hit
* @the_port: port to output
*/
static void ioc4_cb_output_lowat(struct uart_port *the_port)
{
unsigned long pflags;
/* ip_lock is set on the call here */
if (the_port) {
spin_lock_irqsave(&the_port->lock, pflags);
transmit_chars(the_port);
spin_unlock_irqrestore(&the_port->lock, pflags);
}
}
/**
* handle_intr - service any interrupts for the given port - 2nd level
* called via sd_intr
* @arg: handler arg
* @sio_ir: ioc4regs
*/
static void handle_intr(void *arg, uint32_t sio_ir)
{
struct ioc4_port *port = (struct ioc4_port *)arg;
struct hooks *hooks = port->ip_hooks;
unsigned int rx_high_rd_aborted = 0;
unsigned long flags;
struct uart_port *the_port;
int loop_counter;
/* Possible race condition here: The tx_mt interrupt bit may be
* cleared without the intervention of the interrupt handler,
* e.g. by a write. If the top level interrupt handler reads a
* tx_mt, then some other processor does a write, starting up
* output, then we come in here, see the tx_mt and stop DMA, the
* output started by the other processor will hang. Thus we can
* only rely on tx_mt being legitimate if it is read while the
* port lock is held. Therefore this bit must be ignored in the
* passed in interrupt mask which was read by the top level
* interrupt handler since the port lock was not held at the time
* it was read. We can only rely on this bit being accurate if it
* is read while the port lock is held. So we'll clear it for now,
* and reload it later once we have the port lock.
*/
sio_ir &= ~(hooks->intr_tx_mt);
spin_lock_irqsave(&port->ip_lock, flags);
loop_counter = MAXITER; /* to avoid hangs */
do {
uint32_t shadow;
if ( loop_counter-- <= 0 ) {
printk(KERN_WARNING "IOC4 serial: "
"possible hang condition/"
"port stuck on interrupt.\n");
break;
}
/* Handle a DCD change */
if (sio_ir & hooks->intr_delta_dcd) {
/* ACK the interrupt */
writel(hooks->intr_delta_dcd,
&port->ip_mem->sio_ir.raw);
shadow = readl(&port->ip_serial_regs->shadow);
if ((port->ip_notify & N_DDCD)
&& (shadow & IOC4_SHADOW_DCD)
&& (port->ip_port)) {
the_port = port->ip_port;
the_port->icount.dcd = 1;
wake_up_interruptible
(&the_port->state->port.delta_msr_wait);
} else if ((port->ip_notify & N_DDCD)
&& !(shadow & IOC4_SHADOW_DCD)) {
/* Flag delta DCD/no DCD */
port->ip_flags |= DCD_ON;
}
}
/* Handle a CTS change */
if (sio_ir & hooks->intr_delta_cts) {
/* ACK the interrupt */
writel(hooks->intr_delta_cts,
&port->ip_mem->sio_ir.raw);
shadow = readl(&port->ip_serial_regs->shadow);
if ((port->ip_notify & N_DCTS)
&& (port->ip_port)) {
the_port = port->ip_port;
the_port->icount.cts =
(shadow & IOC4_SHADOW_CTS) ? 1 : 0;
wake_up_interruptible
(&the_port->state->port.delta_msr_wait);
}
}
/* rx timeout interrupt. Must be some data available. Put this
* before the check for rx_high since servicing this condition
* may cause that condition to clear.
*/
if (sio_ir & hooks->intr_rx_timer) {
/* ACK the interrupt */
writel(hooks->intr_rx_timer,
&port->ip_mem->sio_ir.raw);
if ((port->ip_notify & N_DATA_READY)
&& (port->ip_port)) {
/* ip_lock is set on call here */
receive_chars(port->ip_port);
}
}
/* rx high interrupt. Must be after rx_timer. */
else if (sio_ir & hooks->intr_rx_high) {
/* Data available, notify upper layer */
if ((port->ip_notify & N_DATA_READY)
&& port->ip_port) {
/* ip_lock is set on call here */
receive_chars(port->ip_port);
}
/* We can't ACK this interrupt. If receive_chars didn't
* cause the condition to clear, we'll have to disable
* the interrupt until the data is drained.
* If the read was aborted, don't disable the interrupt
* as this may cause us to hang indefinitely. An
* aborted read generally means that this interrupt
* hasn't been delivered to the cpu yet anyway, even
* though we see it as asserted when we read the sio_ir.
*/
if ((sio_ir = PENDING(port)) & hooks->intr_rx_high) {
if ((port->ip_flags & READ_ABORTED) == 0) {
port->ip_ienb &= ~hooks->intr_rx_high;
port->ip_flags |= INPUT_HIGH;
} else {
rx_high_rd_aborted++;
}
}
}
/* We got a low water interrupt: notify upper layer to
* send more data. Must come before tx_mt since servicing
* this condition may cause that condition to clear.
*/
if (sio_ir & hooks->intr_tx_explicit) {
port->ip_flags &= ~LOWAT_WRITTEN;
/* ACK the interrupt */
writel(hooks->intr_tx_explicit,
&port->ip_mem->sio_ir.raw);
if (port->ip_notify & N_OUTPUT_LOWAT)
ioc4_cb_output_lowat(port->ip_port);
}
/* Handle tx_mt. Must come after tx_explicit. */
else if (sio_ir & hooks->intr_tx_mt) {
/* If we are expecting a lowat notification
* and we get to this point it probably means that for
* some reason the tx_explicit didn't work as expected
* (that can legitimately happen if the output buffer is
* filled up in just the right way).
* So send the notification now.
*/
if (port->ip_notify & N_OUTPUT_LOWAT) {
ioc4_cb_output_lowat(port->ip_port);
/* We need to reload the sio_ir since the lowat
* call may have caused another write to occur,
* clearing the tx_mt condition.
*/
sio_ir = PENDING(port);
}
/* If the tx_mt condition still persists even after the
* lowat call, we've got some work to do.
*/
if (sio_ir & hooks->intr_tx_mt) {
/* If we are not currently expecting DMA input,
* and the transmitter has just gone idle,
* there is no longer any reason for DMA, so
* disable it.
*/
if (!(port->ip_notify
& (N_DATA_READY | N_DDCD))) {
BUG_ON(!(port->ip_sscr
& IOC4_SSCR_DMA_EN));
port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
writel(port->ip_sscr,
&port->ip_serial_regs->sscr);
}
/* Prevent infinite tx_mt interrupt */
port->ip_ienb &= ~hooks->intr_tx_mt;
}
}
sio_ir = PENDING(port);
/* if the read was aborted and only hooks->intr_rx_high,
* clear hooks->intr_rx_high, so we do not loop forever.
*/
if (rx_high_rd_aborted && (sio_ir == hooks->intr_rx_high)) {
sio_ir &= ~hooks->intr_rx_high;
}
} while (sio_ir & hooks->intr_all);
spin_unlock_irqrestore(&port->ip_lock, flags);
/* Re-enable interrupts before returning from interrupt handler.
* Getting interrupted here is okay. It'll just v() our semaphore, and
* we'll come through the loop again.
*/
write_ireg(port->ip_ioc4_soft, port->ip_ienb, IOC4_W_IES,
IOC4_SIO_INTR_TYPE);
}
/*
* ioc4_cb_post_ncs - called for some basic errors
* @port: port to use
* @ncs: event
*/
static void ioc4_cb_post_ncs(struct uart_port *the_port, int ncs)
{
struct uart_icount *icount;
icount = &the_port->icount;
if (ncs & NCS_BREAK)
icount->brk++;
if (ncs & NCS_FRAMING)
icount->frame++;
if (ncs & NCS_OVERRUN)
icount->overrun++;
if (ncs & NCS_PARITY)
icount->parity++;
}
/**
* do_read - Read in bytes from the port. Return the number of bytes
* actually read.
* @the_port: port to use
* @buf: place to put the stuff we read
* @len: how big 'buf' is
*/
static inline int do_read(struct uart_port *the_port, unsigned char *buf,
int len)
{
int prod_ptr, cons_ptr, total;
struct ioc4_port *port = get_ioc4_port(the_port, 0);
struct ring *inring;
struct ring_entry *entry;
struct hooks *hooks = port->ip_hooks;
int byte_num;
char *sc;
int loop_counter;
BUG_ON(!(len >= 0));
BUG_ON(!port);
/* There is a nasty timing issue in the IOC4. When the rx_timer
* expires or the rx_high condition arises, we take an interrupt.
* At some point while servicing the interrupt, we read bytes from
* the ring buffer and re-arm the rx_timer. However the rx_timer is
* not started until the first byte is received *after* it is armed,
* and any bytes pending in the rx construction buffers are not drained
* to memory until either there are 4 bytes available or the rx_timer
* expires. This leads to a potential situation where data is left
* in the construction buffers forever - 1 to 3 bytes were received
* after the interrupt was generated but before the rx_timer was
* re-armed. At that point as long as no subsequent bytes are received
* the timer will never be started and the bytes will remain in the
* construction buffer forever. The solution is to execute a DRAIN
* command after rearming the timer. This way any bytes received before
* the DRAIN will be drained to memory, and any bytes received after
* the DRAIN will start the TIMER and be drained when it expires.
* Luckily, this only needs to be done when the DMA buffer is empty
* since there is no requirement that this function return all
* available data as long as it returns some.
*/
/* Re-arm the timer */
writel(port->ip_rx_cons | IOC4_SRCIR_ARM, &port->ip_serial_regs->srcir);
prod_ptr = readl(&port->ip_serial_regs->srpir) & PROD_CONS_MASK;
cons_ptr = port->ip_rx_cons;
if (prod_ptr == cons_ptr) {
int reset_dma = 0;
/* Input buffer appears empty, do a flush. */
/* DMA must be enabled for this to work. */
if (!(port->ip_sscr & IOC4_SSCR_DMA_EN)) {
port->ip_sscr |= IOC4_SSCR_DMA_EN;
reset_dma = 1;
}
/* Potential race condition: we must reload the srpir after
* issuing the drain command, otherwise we could think the rx
* buffer is empty, then take a very long interrupt, and when
* we come back it's full and we wait forever for the drain to
* complete.
*/
writel(port->ip_sscr | IOC4_SSCR_RX_DRAIN,
&port->ip_serial_regs->sscr);
prod_ptr = readl(&port->ip_serial_regs->srpir)
& PROD_CONS_MASK;
/* We must not wait for the DRAIN to complete unless there are
* at least 8 bytes (2 ring entries) available to receive the
* data otherwise the DRAIN will never complete and we'll
* deadlock here.
* In fact, to make things easier, I'll just ignore the flush if
* there is any data at all now available.
*/
if (prod_ptr == cons_ptr) {
loop_counter = 0;
while (readl(&port->ip_serial_regs->sscr) &
IOC4_SSCR_RX_DRAIN) {
loop_counter++;
if (loop_counter > MAXITER)
return -1;
}
/* SIGH. We have to reload the prod_ptr *again* since
* the drain may have caused it to change
*/
prod_ptr = readl(&port->ip_serial_regs->srpir)
& PROD_CONS_MASK;
}
if (reset_dma) {
port->ip_sscr &= ~IOC4_SSCR_DMA_EN;
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
}
}
inring = port->ip_inring;
port->ip_flags &= ~READ_ABORTED;
total = 0;
loop_counter = 0xfffff; /* to avoid hangs */
/* Grab bytes from the hardware */
while ((prod_ptr != cons_ptr) && (len > 0)) {
entry = (struct ring_entry *)((caddr_t)inring + cons_ptr);
if ( loop_counter-- <= 0 ) {
printk(KERN_WARNING "IOC4 serial: "
"possible hang condition/"
"port stuck on read.\n");
break;
}
/* According to the producer pointer, this ring entry
* must contain some data. But if the PIO happened faster
* than the DMA, the data may not be available yet, so let's
* wait until it arrives.
*/
if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
/* Indicate the read is aborted so we don't disable
* the interrupt thinking that the consumer is
* congested.
*/
port->ip_flags |= READ_ABORTED;
len = 0;
break;
}
/* Load the bytes/status out of the ring entry */
for (byte_num = 0; byte_num < 4 && len > 0; byte_num++) {
sc = &(entry->ring_sc[byte_num]);
/* Check for change in modem state or overrun */
if ((*sc & IOC4_RXSB_MODEM_VALID)
&& (port->ip_notify & N_DDCD)) {
/* Notify upper layer if DCD dropped */
if ((port->ip_flags & DCD_ON)
&& !(*sc & IOC4_RXSB_DCD)) {
/* If we have already copied some data,
* return it. We'll pick up the carrier
* drop on the next pass. That way we
* don't throw away the data that has
* already been copied back to
* the caller's buffer.
*/
if (total > 0) {
len = 0;
break;
}
port->ip_flags &= ~DCD_ON;
/* Turn off this notification so the
* carrier drop protocol won't see it
* again when it does a read.
*/
*sc &= ~IOC4_RXSB_MODEM_VALID;
/* To keep things consistent, we need
* to update the consumer pointer so
* the next reader won't come in and
* try to read the same ring entries
* again. This must be done here before
* the dcd change.
*/
if ((entry->ring_allsc & RING_ANY_VALID)
== 0) {
cons_ptr += (int)sizeof
(struct ring_entry);
cons_ptr &= PROD_CONS_MASK;
}
writel(cons_ptr,
&port->ip_serial_regs->srcir);
port->ip_rx_cons = cons_ptr;
/* Notify upper layer of carrier drop */
if ((port->ip_notify & N_DDCD)
&& port->ip_port) {
the_port->icount.dcd = 0;
wake_up_interruptible
(&the_port->state->
port.delta_msr_wait);
}
/* If we had any data to return, we
* would have returned it above.
*/
return 0;
}
}
if (*sc & IOC4_RXSB_MODEM_VALID) {
/* Notify that an input overrun occurred */
if ((*sc & IOC4_RXSB_OVERRUN)
&& (port->ip_notify & N_OVERRUN_ERROR)) {
ioc4_cb_post_ncs(the_port, NCS_OVERRUN);
}
/* Don't look at this byte again */
*sc &= ~IOC4_RXSB_MODEM_VALID;
}
/* Check for valid data or RX errors */
if ((*sc & IOC4_RXSB_DATA_VALID) &&
((*sc & (IOC4_RXSB_PAR_ERR
| IOC4_RXSB_FRAME_ERR
| IOC4_RXSB_BREAK))
&& (port->ip_notify & (N_PARITY_ERROR
| N_FRAMING_ERROR
| N_BREAK)))) {
/* There is an error condition on the next byte.
* If we have already transferred some bytes,
* we'll stop here. Otherwise if this is the
* first byte to be read, we'll just transfer
* it alone after notifying the
* upper layer of its status.
*/
if (total > 0) {
len = 0;
break;
} else {
if ((*sc & IOC4_RXSB_PAR_ERR) &&
(port->ip_notify & N_PARITY_ERROR)) {
ioc4_cb_post_ncs(the_port,
NCS_PARITY);
}
if ((*sc & IOC4_RXSB_FRAME_ERR) &&
(port->ip_notify & N_FRAMING_ERROR)){
ioc4_cb_post_ncs(the_port,
NCS_FRAMING);
}
if ((*sc & IOC4_RXSB_BREAK)
&& (port->ip_notify & N_BREAK)) {
ioc4_cb_post_ncs
(the_port,
NCS_BREAK);
}
len = 1;
}
}
if (*sc & IOC4_RXSB_DATA_VALID) {
*sc &= ~IOC4_RXSB_DATA_VALID;
*buf = entry->ring_data[byte_num];
buf++;
len--;
total++;
}
}
/* If we used up this entry entirely, go on to the next one,
* otherwise we must have run out of buffer space, so
* leave the consumer pointer here for the next read in case
* there are still unread bytes in this entry.
*/
if ((entry->ring_allsc & RING_ANY_VALID) == 0) {
cons_ptr += (int)sizeof(struct ring_entry);
cons_ptr &= PROD_CONS_MASK;
}
}
/* Update consumer pointer and re-arm rx timer interrupt */
writel(cons_ptr, &port->ip_serial_regs->srcir);
port->ip_rx_cons = cons_ptr;
/* If we have now dipped below the rx high water mark and we have
* rx_high interrupt turned off, we can now turn it back on again.
*/
if ((port->ip_flags & INPUT_HIGH) && (((prod_ptr - cons_ptr)
& PROD_CONS_MASK) < ((port->ip_sscr &
IOC4_SSCR_RX_THRESHOLD)
<< IOC4_PROD_CONS_PTR_OFF))) {
port->ip_flags &= ~INPUT_HIGH;
enable_intrs(port, hooks->intr_rx_high);
}
return total;
}
/**
* receive_chars - upper level read. Called with ip_lock.
* @the_port: port to read from
*/
static void receive_chars(struct uart_port *the_port)
{
struct tty_struct *tty;
unsigned char ch[IOC4_MAX_CHARS];
int read_count, request_count = IOC4_MAX_CHARS;
struct uart_icount *icount;
struct uart_state *state = the_port->state;
unsigned long pflags;
/* Make sure all the pointers are "good" ones */
if (!state)
return;
if (!state->port.tty)
return;
spin_lock_irqsave(&the_port->lock, pflags);
tty = state->port.tty;
request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS);
if (request_count > 0) {
icount = &the_port->icount;
read_count = do_read(the_port, ch, request_count);
if (read_count > 0) {
tty_insert_flip_string(tty, ch, read_count);
icount->rx += read_count;
}
}
spin_unlock_irqrestore(&the_port->lock, pflags);
tty_flip_buffer_push(tty);
}
/**
* ic4_type - What type of console are we?
* @port: Port to operate with (we ignore since we only have one port)
*
*/
static const char *ic4_type(struct uart_port *the_port)
{
if (the_port->mapbase == PROTO_RS232)
return "SGI IOC4 Serial [rs232]";
else
return "SGI IOC4 Serial [rs422]";
}
/**
* ic4_tx_empty - Is the transmitter empty?
* @port: Port to operate on
*
*/
static unsigned int ic4_tx_empty(struct uart_port *the_port)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
unsigned int ret = 0;
if (port_is_active(port, the_port)) {
if (readl(&port->ip_serial_regs->shadow) & IOC4_SHADOW_TEMT)
ret = TIOCSER_TEMT;
}
return ret;
}
/**
* ic4_stop_tx - stop the transmitter
* @port: Port to operate on
*
*/
static void ic4_stop_tx(struct uart_port *the_port)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
if (port_is_active(port, the_port))
set_notification(port, N_OUTPUT_LOWAT, 0);
}
/**
* null_void_function -
* @port: Port to operate on
*
*/
static void null_void_function(struct uart_port *the_port)
{
}
/**
* ic4_shutdown - shut down the port - free irq and disable
* @port: Port to shut down
*
*/
static void ic4_shutdown(struct uart_port *the_port)
{
unsigned long port_flags;
struct ioc4_port *port;
struct uart_state *state;
port = get_ioc4_port(the_port, 0);
if (!port)
return;
state = the_port->state;
port->ip_port = NULL;
wake_up_interruptible(&state->port.delta_msr_wait);
if (state->port.tty)
set_bit(TTY_IO_ERROR, &state->port.tty->flags);
spin_lock_irqsave(&the_port->lock, port_flags);
set_notification(port, N_ALL, 0);
port->ip_flags = PORT_INACTIVE;
spin_unlock_irqrestore(&the_port->lock, port_flags);
}
/**
* ic4_set_mctrl - set control lines (dtr, rts, etc)
* @port: Port to operate on
* @mctrl: Lines to set/unset
*
*/
static void ic4_set_mctrl(struct uart_port *the_port, unsigned int mctrl)
{
unsigned char mcr = 0;
struct ioc4_port *port;
port = get_ioc4_port(the_port, 0);
if (!port_is_active(port, the_port))
return;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
set_mcr(the_port, mcr, IOC4_SHADOW_DTR);
}
/**
* ic4_get_mctrl - get control line info
* @port: port to operate on
*
*/
static unsigned int ic4_get_mctrl(struct uart_port *the_port)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
uint32_t shadow;
unsigned int ret = 0;
if (!port_is_active(port, the_port))
return 0;
shadow = readl(&port->ip_serial_regs->shadow);
if (shadow & IOC4_SHADOW_DCD)
ret |= TIOCM_CAR;
if (shadow & IOC4_SHADOW_DR)
ret |= TIOCM_DSR;
if (shadow & IOC4_SHADOW_CTS)
ret |= TIOCM_CTS;
return ret;
}
/**
* ic4_start_tx - Start transmitter, flush any output
* @port: Port to operate on
*
*/
static void ic4_start_tx(struct uart_port *the_port)
{
struct ioc4_port *port = get_ioc4_port(the_port, 0);
if (port_is_active(port, the_port)) {
set_notification(port, N_OUTPUT_LOWAT, 1);
enable_intrs(port, port->ip_hooks->intr_tx_mt);
}
}
/**
* ic4_break_ctl - handle breaks
* @port: Port to operate on
* @break_state: Break state
*
*/
static void ic4_break_ctl(struct uart_port *the_port, int break_state)
{
}
/**
* ic4_startup - Start up the serial port
* @port: Port to operate on
*
*/
static int ic4_startup(struct uart_port *the_port)
{
int retval;
struct ioc4_port *port;
struct ioc4_control *control;
struct uart_state *state;
unsigned long port_flags;
if (!the_port)
return -ENODEV;
port = get_ioc4_port(the_port, 1);
if (!port)
return -ENODEV;
state = the_port->state;
control = port->ip_control;
if (!control) {
port->ip_port = NULL;
return -ENODEV;
}
/* Start up the serial port */
spin_lock_irqsave(&the_port->lock, port_flags);
retval = ic4_startup_local(the_port);
spin_unlock_irqrestore(&the_port->lock, port_flags);
return retval;
}
/**
* ic4_set_termios - set termios stuff
* @port: port to operate on
* @termios: New settings
* @termios: Old
*
*/
static void
ic4_set_termios(struct uart_port *the_port,
struct ktermios *termios, struct ktermios *old_termios)
{
unsigned long port_flags;
spin_lock_irqsave(&the_port->lock, port_flags);
ioc4_change_speed(the_port, termios, old_termios);
spin_unlock_irqrestore(&the_port->lock, port_flags);
}
/**
* ic4_request_port - allocate resources for port - no op....
* @port: port to operate on
*
*/
static int ic4_request_port(struct uart_port *port)
{
return 0;
}
/* Associate the uart functions above - given to serial core */
static struct uart_ops ioc4_ops = {
.tx_empty = ic4_tx_empty,
.set_mctrl = ic4_set_mctrl,
.get_mctrl = ic4_get_mctrl,
.stop_tx = ic4_stop_tx,
.start_tx = ic4_start_tx,
.stop_rx = null_void_function,
.enable_ms = null_void_function,
.break_ctl = ic4_break_ctl,
.startup = ic4_startup,
.shutdown = ic4_shutdown,
.set_termios = ic4_set_termios,
.type = ic4_type,
.release_port = null_void_function,
.request_port = ic4_request_port,
};
/*
* Boot-time initialization code
*/
static struct uart_driver ioc4_uart_rs232 = {
.owner = THIS_MODULE,
.driver_name = "ioc4_serial_rs232",
.dev_name = DEVICE_NAME_RS232,
.major = DEVICE_MAJOR,
.minor = DEVICE_MINOR_RS232,
.nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
};
static struct uart_driver ioc4_uart_rs422 = {
.owner = THIS_MODULE,
.driver_name = "ioc4_serial_rs422",
.dev_name = DEVICE_NAME_RS422,
.major = DEVICE_MAJOR,
.minor = DEVICE_MINOR_RS422,
.nr = IOC4_NUM_CARDS * IOC4_NUM_SERIAL_PORTS,
};
/**
* ioc4_serial_remove_one - detach function
*
* @idd: IOC4 master module data for this IOC4
*/
static int ioc4_serial_remove_one(struct ioc4_driver_data *idd)
{
int port_num, port_type;
struct ioc4_control *control;
struct uart_port *the_port;
struct ioc4_port *port;
struct ioc4_soft *soft;
/* If serial driver did not attach, don't try to detach */
control = idd->idd_serial_data;
if (!control)
return 0;
for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
for (port_type = UART_PORT_MIN;
port_type < UART_PORT_COUNT;
port_type++) {
the_port = &control->ic_port[port_num].icp_uart_port
[port_type];
if (the_port) {
switch (port_type) {
case UART_PORT_RS422:
uart_remove_one_port(&ioc4_uart_rs422,
the_port);
break;
default:
case UART_PORT_RS232:
uart_remove_one_port(&ioc4_uart_rs232,
the_port);
break;
}
}
}
port = control->ic_port[port_num].icp_port;
/* we allocate in pairs */
if (!(port_num & 1) && port) {
pci_free_consistent(port->ip_pdev,
TOTAL_RING_BUF_SIZE,
port->ip_cpu_ringbuf,
port->ip_dma_ringbuf);
kfree(port);
}
}
soft = control->ic_soft;
if (soft) {
free_irq(control->ic_irq, soft);
if (soft->is_ioc4_serial_addr) {
iounmap(soft->is_ioc4_serial_addr);
release_mem_region((unsigned long)
soft->is_ioc4_serial_addr,
sizeof(struct ioc4_serial));
}
kfree(soft);
}
kfree(control);
idd->idd_serial_data = NULL;
return 0;
}
/**
* ioc4_serial_core_attach_rs232 - register with serial core
* This is done during pci probing
* @pdev: handle for this card
*/
static inline int
ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
{
struct ioc4_port *port;
struct uart_port *the_port;
struct ioc4_driver_data *idd = pci_get_drvdata(pdev);
struct ioc4_control *control = idd->idd_serial_data;
int port_num;
int port_type_idx;
struct uart_driver *u_driver;
DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n",
__func__, pdev, (void *)control));
if (!control)
return -ENODEV;
port_type_idx = (port_type == PROTO_RS232) ? UART_PORT_RS232
: UART_PORT_RS422;
u_driver = (port_type == PROTO_RS232) ? &ioc4_uart_rs232
: &ioc4_uart_rs422;
/* once around for each port on this card */
for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
the_port = &control->ic_port[port_num].icp_uart_port
[port_type_idx];
port = control->ic_port[port_num].icp_port;
port->ip_all_ports[port_type_idx] = the_port;
DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n",
__func__, (void *)the_port,
(void *)port,
port_type == PROTO_RS232 ? "rs232" : "rs422"));
/* membase, iobase and mapbase just need to be non-0 */
the_port->membase = (unsigned char __iomem *)1;
the_port->iobase = (pdev->bus->number << 16) | port_num;
the_port->line = (Num_of_ioc4_cards << 2) | port_num;
the_port->mapbase = port_type;
the_port->type = PORT_16550A;
the_port->fifosize = IOC4_FIFO_CHARS;
the_port->ops = &ioc4_ops;
the_port->irq = control->ic_irq;
the_port->dev = &pdev->dev;
spin_lock_init(&the_port->lock);
if (uart_add_one_port(u_driver, the_port) < 0) {
printk(KERN_WARNING
"%s: unable to add port %d bus %d\n",
__func__, the_port->line, pdev->bus->number);
} else {
DPRINT_CONFIG(
("IOC4 serial port %d irq = %d, bus %d\n",
the_port->line, the_port->irq, pdev->bus->number));
}
}
return 0;
}
/**
* ioc4_serial_attach_one - register attach function
* called per card found from IOC4 master module.
* @idd: Master module data for this IOC4
*/
int
ioc4_serial_attach_one(struct ioc4_driver_data *idd)
{
unsigned long tmp_addr1;
struct ioc4_serial __iomem *serial;
struct ioc4_soft *soft;
struct ioc4_control *control;
int ret = 0;
DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev,
idd->idd_pci_id));
/* PCI-RT does not bring out serial connections.
* Do not attach to this particular IOC4.
*/
if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
return 0;
/* request serial registers */
tmp_addr1 = idd->idd_bar0 + IOC4_SERIAL_OFFSET;
if (!request_mem_region(tmp_addr1, sizeof(struct ioc4_serial),
"sioc4_uart")) {
printk(KERN_WARNING
"ioc4 (%p): unable to get request region for "
"uart space\n", (void *)idd->idd_pdev);
ret = -ENODEV;
goto out1;
}
serial = ioremap(tmp_addr1, sizeof(struct ioc4_serial));
if (!serial) {
printk(KERN_WARNING
"ioc4 (%p) : unable to remap ioc4 serial register\n",
(void *)idd->idd_pdev);
ret = -ENODEV;
goto out2;
}
DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n",
__func__, (void *)idd->idd_misc_regs,
(void *)serial));
/* Get memory for the new card */
control = kzalloc(sizeof(struct ioc4_control), GFP_KERNEL);
if (!control) {
printk(KERN_WARNING "ioc4_attach_one"
": unable to get memory for the IOC4\n");
ret = -ENOMEM;
goto out2;
}
idd->idd_serial_data = control;
/* Allocate the soft structure */
soft = kzalloc(sizeof(struct ioc4_soft), GFP_KERNEL);
if (!soft) {
printk(KERN_WARNING
"ioc4 (%p): unable to get memory for the soft struct\n",
(void *)idd->idd_pdev);
ret = -ENOMEM;
goto out3;
}
spin_lock_init(&soft->is_ir_lock);
soft->is_ioc4_misc_addr = idd->idd_misc_regs;
soft->is_ioc4_serial_addr = serial;
/* Init the IOC4 */
writel(0xf << IOC4_SIO_CR_CMD_PULSE_SHIFT,
&idd->idd_misc_regs->sio_cr.raw);
/* Enable serial port mode select generic PIO pins as outputs */
writel(IOC4_GPCR_UART0_MODESEL | IOC4_GPCR_UART1_MODESEL
| IOC4_GPCR_UART2_MODESEL | IOC4_GPCR_UART3_MODESEL,
&idd->idd_misc_regs->gpcr_s.raw);
/* Clear and disable all serial interrupts */
write_ireg(soft, ~0, IOC4_W_IEC, IOC4_SIO_INTR_TYPE);
writel(~0, &idd->idd_misc_regs->sio_ir.raw);
write_ireg(soft, IOC4_OTHER_IR_SER_MEMERR, IOC4_W_IEC,
IOC4_OTHER_INTR_TYPE);
writel(IOC4_OTHER_IR_SER_MEMERR, &idd->idd_misc_regs->other_ir.raw);
control->ic_soft = soft;
/* Hook up interrupt handler */
if (!request_irq(idd->idd_pdev->irq, ioc4_intr, IRQF_SHARED,
"sgi-ioc4serial", soft)) {
control->ic_irq = idd->idd_pdev->irq;
} else {
printk(KERN_WARNING
"%s : request_irq fails for IRQ 0x%x\n ",
__func__, idd->idd_pdev->irq);
}
ret = ioc4_attach_local(idd);
if (ret)
goto out4;
/* register port with the serial core - 1 rs232, 1 rs422 */
if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS232)))
goto out4;
if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS422)))
goto out5;
Num_of_ioc4_cards++;
return ret;
/* error exits that give back resources */
out5:
ioc4_serial_remove_one(idd);
out4:
kfree(soft);
out3:
kfree(control);
out2:
if (serial)
iounmap(serial);
release_mem_region(tmp_addr1, sizeof(struct ioc4_serial));
out1:
return ret;
}
static struct ioc4_submodule ioc4_serial_submodule = {
.is_name = "IOC4_serial",
.is_owner = THIS_MODULE,
.is_probe = ioc4_serial_attach_one,
.is_remove = ioc4_serial_remove_one,
};
/**
* ioc4_serial_init - module init
*/
static int __init ioc4_serial_init(void)
{
int ret;
/* register with serial core */
if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) {
printk(KERN_WARNING
"%s: Couldn't register rs232 IOC4 serial driver\n",
__func__);
goto out;
}
if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
printk(KERN_WARNING
"%s: Couldn't register rs422 IOC4 serial driver\n",
__func__);
goto out_uart_rs232;
}
/* register with IOC4 main module */
ret = ioc4_register_submodule(&ioc4_serial_submodule);
if (ret)
goto out_uart_rs422;
return 0;
out_uart_rs422:
uart_unregister_driver(&ioc4_uart_rs422);
out_uart_rs232:
uart_unregister_driver(&ioc4_uart_rs232);
out:
return ret;
}
static void __exit ioc4_serial_exit(void)
{
ioc4_unregister_submodule(&ioc4_serial_submodule);
uart_unregister_driver(&ioc4_uart_rs232);
uart_unregister_driver(&ioc4_uart_rs422);
}
late_initcall(ioc4_serial_init); /* Call only after tty init is done */
module_exit(ioc4_serial_exit);
MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
MODULE_DESCRIPTION("Serial PCI driver module for SGI IOC4 Base-IO Card");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TeamOrion-Devices/Orion_kernel_motorola_msm8226 | fs/bio-integrity.c | 4943 | 21679 | /*
* bio-integrity.c - bio data integrity extensions
*
* Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*/
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/export.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
struct integrity_slab {
struct kmem_cache *slab;
unsigned short nr_vecs;
char name[8];
};
#define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) }
struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = {
IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES),
};
#undef IS
static struct workqueue_struct *kintegrityd_wq;
static inline unsigned int vecs_to_idx(unsigned int nr)
{
switch (nr) {
case 1:
return 0;
case 2 ... 4:
return 1;
case 5 ... 16:
return 2;
case 17 ... 64:
return 3;
case 65 ... 128:
return 4;
case 129 ... BIO_MAX_PAGES:
return 5;
default:
BUG();
}
}
static inline int use_bip_pool(unsigned int idx)
{
if (idx == BIOVEC_MAX_IDX)
return 1;
return 0;
}
/**
* bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
* @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
gfp_t gfp_mask,
unsigned int nr_vecs,
struct bio_set *bs)
{
struct bio_integrity_payload *bip;
unsigned int idx = vecs_to_idx(nr_vecs);
BUG_ON(bio == NULL);
bip = NULL;
/* Lower order allocations come straight from slab */
if (!use_bip_pool(idx))
bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask);
/* Use mempool if lower order alloc failed or max vecs were requested */
if (bip == NULL) {
idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
if (unlikely(bip == NULL)) {
printk(KERN_ERR "%s: could not alloc bip\n", __func__);
return NULL;
}
}
memset(bip, 0, sizeof(*bip));
bip->bip_slab = idx;
bip->bip_bio = bio;
bio->bi_integrity = bip;
return bip;
}
EXPORT_SYMBOL(bio_integrity_alloc_bioset);
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
gfp_t gfp_mask,
unsigned int nr_vecs)
{
return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
* @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
void bio_integrity_free(struct bio *bio, struct bio_set *bs)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
BUG_ON(bip == NULL);
/* A cloned bio doesn't own the integrity metadata */
if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY)
&& bip->bip_buf != NULL)
kfree(bip->bip_buf);
if (use_bip_pool(bip->bip_slab))
mempool_free(bip, bs->bio_integrity_pool);
else
kmem_cache_free(bip_slab[bip->bip_slab].slab, bip);
bio->bi_integrity = NULL;
}
EXPORT_SYMBOL(bio_integrity_free);
/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
* @page: page containing integrity metadata
* @len: number of bytes of integrity metadata in page
* @offset: start offset within page
*
* Description: Attach a page containing integrity metadata to bio.
*/
int bio_integrity_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
iv = bip_vec_idx(bip, bip->bip_vcnt);
BUG_ON(iv == NULL);
iv->bv_page = page;
iv->bv_len = len;
iv->bv_offset = offset;
bip->bip_vcnt++;
return len;
}
EXPORT_SYMBOL(bio_integrity_add_page);
static int bdev_integrity_enabled(struct block_device *bdev, int rw)
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
if (bi == NULL)
return 0;
if (rw == READ && bi->verify_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_READ))
return 1;
if (rw == WRITE && bi->generate_fn != NULL &&
(bi->flags & INTEGRITY_FLAG_WRITE))
return 1;
return 0;
}
/**
* bio_integrity_enabled - Check whether integrity can be passed
* @bio: bio to check
*
* Description: Determines whether bio_integrity_prep() can be called
* on this bio or not. bio data direction and target device must be
* set prior to calling. The functions honors the write_generate and
* read_verify flags in sysfs.
*/
int bio_integrity_enabled(struct bio *bio)
{
/* Already protected? */
if (bio_integrity(bio))
return 0;
return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio));
}
EXPORT_SYMBOL(bio_integrity_enabled);
/**
* bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto
* @bi: blk_integrity profile for device
* @sectors: Number of 512 sectors to convert
*
* Description: The block layer calculates everything in 512 byte
* sectors but integrity metadata is done in terms of the hardware
* sector size of the storage device. Convert the block layer sectors
* to physical sectors.
*/
static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
unsigned int sectors)
{
/* At this point there are only 512b or 4096b DIF/EPP devices */
if (bi->sector_size == 4096)
return sectors >>= 3;
return sectors;
}
/**
* bio_integrity_tag_size - Retrieve integrity tag space
* @bio: bio to inspect
*
* Description: Returns the maximum number of tag bytes that can be
* attached to this bio. Filesystems can use this to determine how
* much metadata to attach to an I/O.
*/
unsigned int bio_integrity_tag_size(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
BUG_ON(bio->bi_size == 0);
return bi->tag_size * (bio->bi_size / bi->sector_size);
}
EXPORT_SYMBOL(bio_integrity_tag_size);
int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
unsigned int nr_sectors;
BUG_ON(bip->bip_buf == NULL);
if (bi->tag_size == 0)
return -1;
nr_sectors = bio_integrity_hw_sectors(bi,
DIV_ROUND_UP(len, bi->tag_size));
if (nr_sectors * bi->tuple_size > bip->bip_size) {
printk(KERN_ERR "%s: tag too big for bio: %u > %u\n",
__func__, nr_sectors * bi->tuple_size, bip->bip_size);
return -1;
}
if (set)
bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
else
bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors);
return 0;
}
/**
* bio_integrity_set_tag - Attach a tag buffer to a bio
* @bio: bio to attach buffer to
* @tag_buf: Pointer to a buffer containing tag data
* @len: Length of the included buffer
*
* Description: Use this function to tag a bio by leveraging the extra
* space provided by devices formatted with integrity protection. The
* size of the integrity buffer must be <= to the size reported by
* bio_integrity_tag_size().
*/
int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len)
{
BUG_ON(bio_data_dir(bio) != WRITE);
return bio_integrity_tag(bio, tag_buf, len, 1);
}
EXPORT_SYMBOL(bio_integrity_set_tag);
/**
* bio_integrity_get_tag - Retrieve a tag buffer from a bio
* @bio: bio to retrieve buffer from
* @tag_buf: Pointer to a buffer for the tag data
* @len: Length of the target buffer
*
* Description: Use this function to retrieve the tag buffer from a
* completed I/O. The size of the integrity buffer must be <= to the
* size reported by bio_integrity_tag_size().
*/
int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
{
BUG_ON(bio_data_dir(bio) != READ);
return bio_integrity_tag(bio, tag_buf, len, 0);
}
EXPORT_SYMBOL(bio_integrity_get_tag);
/**
* bio_integrity_generate - Generate integrity metadata for a bio
* @bio: bio to generate integrity metadata for
*
* Description: Generates integrity metadata for a bio by calling the
* block device's generation callback function. The bio must have a
* bip attached with enough room to accommodate the generated
* integrity metadata.
*/
static void bio_integrity_generate(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_sector;
unsigned int i, sectors, total;
void *prot_buf = bio->bi_integrity->bip_buf;
total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) {
void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf;
bix.sector = sector;
bi->generate_fn(&bix);
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size);
kunmap_atomic(kaddr);
}
}
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
{
if (bi)
return bi->tuple_size;
return 0;
}
/**
* bio_integrity_prep - Prepare bio for integrity I/O
* @bio: bio to prepare
*
* Description: Allocates a buffer for integrity metadata, maps the
* pages and attaches them to a bio. The bio must have data
* direction, target device and start sector set priot to calling. In
* the WRITE case, integrity metadata will be generated using the
* block device's integrity function. In the READ case, the buffer
* will be prepared for DMA and a suitable end_io handler set up.
*/
int bio_integrity_prep(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct blk_integrity *bi;
struct request_queue *q;
void *buf;
unsigned long start, end;
unsigned int len, nr_pages;
unsigned int bytes, offset, i;
unsigned int sectors;
bi = bdev_get_integrity(bio->bi_bdev);
q = bdev_get_queue(bio->bi_bdev);
BUG_ON(bi == NULL);
BUG_ON(bio_integrity(bio));
sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio));
/* Allocate kernel buffer for protection data */
len = sectors * blk_integrity_tuple_size(bi);
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
if (unlikely(buf == NULL)) {
printk(KERN_ERR "could not allocate integrity buffer\n");
return -ENOMEM;
}
end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = ((unsigned long) buf) >> PAGE_SHIFT;
nr_pages = end - start;
/* Allocate bio integrity payload and integrity vectors */
bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
if (unlikely(bip == NULL)) {
printk(KERN_ERR "could not allocate data integrity bioset\n");
kfree(buf);
return -EIO;
}
bip->bip_buf = buf;
bip->bip_size = len;
bip->bip_sector = bio->bi_sector;
/* Map it */
offset = offset_in_page(buf);
for (i = 0 ; i < nr_pages ; i++) {
int ret;
bytes = PAGE_SIZE - offset;
if (len <= 0)
break;
if (bytes > len)
bytes = len;
ret = bio_integrity_add_page(bio, virt_to_page(buf),
bytes, offset);
if (ret == 0)
return 0;
if (ret < bytes)
break;
buf += bytes;
len -= bytes;
offset = 0;
}
/* Install custom I/O completion handler if read verify is enabled */
if (bio_data_dir(bio) == READ) {
bip->bip_end_io = bio->bi_end_io;
bio->bi_end_io = bio_integrity_endio;
}
/* Auto-generate integrity metadata if this is a write */
if (bio_data_dir(bio) == WRITE)
bio_integrity_generate(bio);
return 0;
}
EXPORT_SYMBOL(bio_integrity_prep);
/**
* bio_integrity_verify - Verify integrity metadata for a bio
* @bio: bio to verify
*
* Description: This function is called to verify the integrity of a
* bio. The data in the bio io_vec is compared to the integrity
* metadata returned by the HBA.
*/
static int bio_integrity_verify(struct bio *bio)
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_integrity->bip_sector;
unsigned int i, sectors, total, ret;
void *prot_buf = bio->bi_integrity->bip_buf;
ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
bio_for_each_segment(bv, bio, i) {
void *kaddr = kmap_atomic(bv->bv_page);
bix.data_buf = kaddr + bv->bv_offset;
bix.data_size = bv->bv_len;
bix.prot_buf = prot_buf;
bix.sector = sector;
ret = bi->verify_fn(&bix);
if (ret) {
kunmap_atomic(kaddr);
return ret;
}
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_size);
kunmap_atomic(kaddr);
}
return ret;
}
/**
* bio_integrity_verify_fn - Integrity I/O completion worker
* @work: Work struct stored in bio to be verified
*
* Description: This workqueue function is called to complete a READ
* request. The function verifies the transferred integrity metadata
* and then calls the original bio end_io function.
*/
static void bio_integrity_verify_fn(struct work_struct *work)
{
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
int error;
error = bio_integrity_verify(bio);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
}
/**
* bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
* @error: Pointer to errno
*
* Description: Completion for integrity I/O
*
* Normally I/O completion is done in interrupt context. However,
* verifying I/O integrity is a time-consuming task which must be run
* in process context. This function postpones completion
* accordingly.
*/
void bio_integrity_endio(struct bio *bio, int error)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
BUG_ON(bip->bip_bio != bio);
/* In case of an I/O error there is no point in verifying the
* integrity metadata. Restore original bio end_io handler
* and run it.
*/
if (error) {
bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
return;
}
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
}
EXPORT_SYMBOL(bio_integrity_endio);
/**
* bio_integrity_mark_head - Advance bip_vec skip bytes
* @bip: Integrity vector to advance
* @skip: Number of bytes to advance it
*/
void bio_integrity_mark_head(struct bio_integrity_payload *bip,
unsigned int skip)
{
struct bio_vec *iv;
unsigned int i;
bip_for_each_vec(iv, bip, i) {
if (skip == 0) {
bip->bip_idx = i;
return;
} else if (skip >= iv->bv_len) {
skip -= iv->bv_len;
} else { /* skip < iv->bv_len) */
iv->bv_offset += skip;
iv->bv_len -= skip;
bip->bip_idx = i;
return;
}
}
}
/**
* bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
* @bip: Integrity vector to truncate
* @len: New length of integrity vector
*/
void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
unsigned int len)
{
struct bio_vec *iv;
unsigned int i;
bip_for_each_vec(iv, bip, i) {
if (len == 0) {
bip->bip_vcnt = i;
return;
} else if (len >= iv->bv_len) {
len -= iv->bv_len;
} else { /* len < iv->bv_len) */
iv->bv_len = len;
len = 0;
}
}
}
/**
* bio_integrity_advance - Advance integrity vector
* @bio: bio whose integrity vector to update
* @bytes_done: number of data bytes that have been completed
*
* Description: This function calculates how many integrity bytes the
* number of completed data bytes correspond to and advances the
* integrity vector accordingly.
*/
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
unsigned int nr_sectors;
BUG_ON(bip == NULL);
BUG_ON(bi == NULL);
nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9);
bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
}
EXPORT_SYMBOL(bio_integrity_advance);
/**
* bio_integrity_trim - Trim integrity vector
* @bio: bio whose integrity vector to update
* @offset: offset to first data sector
* @sectors: number of data sectors
*
* Description: Used to trim the integrity vector in a cloned bio.
* The ivec will be advanced corresponding to 'offset' data sectors
* and the length will be truncated corresponding to 'len' data
* sectors.
*/
void bio_integrity_trim(struct bio *bio, unsigned int offset,
unsigned int sectors)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
unsigned int nr_sectors;
BUG_ON(bip == NULL);
BUG_ON(bi == NULL);
BUG_ON(!bio_flagged(bio, BIO_CLONED));
nr_sectors = bio_integrity_hw_sectors(bi, sectors);
bip->bip_sector = bip->bip_sector + offset;
bio_integrity_mark_head(bip, offset * bi->tuple_size);
bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
}
EXPORT_SYMBOL(bio_integrity_trim);
/**
* bio_integrity_split - Split integrity metadata
* @bio: Protected bio
* @bp: Resulting bio_pair
* @sectors: Offset
*
* Description: Splits an integrity page into a bio_pair.
*/
void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
{
struct blk_integrity *bi;
struct bio_integrity_payload *bip = bio->bi_integrity;
unsigned int nr_sectors;
if (bio_integrity(bio) == 0)
return;
bi = bdev_get_integrity(bio->bi_bdev);
BUG_ON(bi == NULL);
BUG_ON(bip->bip_vcnt != 1);
nr_sectors = bio_integrity_hw_sectors(bi, sectors);
bp->bio1.bi_integrity = &bp->bip1;
bp->bio2.bi_integrity = &bp->bip2;
bp->iv1 = bip->bip_vec[0];
bp->iv2 = bip->bip_vec[0];
bp->bip1.bip_vec[0] = bp->iv1;
bp->bip2.bip_vec[0] = bp->iv2;
bp->iv1.bv_len = sectors * bi->tuple_size;
bp->iv2.bv_offset += sectors * bi->tuple_size;
bp->iv2.bv_len -= sectors * bi->tuple_size;
bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
}
EXPORT_SYMBOL(bio_integrity_split);
/**
* bio_integrity_clone - Callback for cloning bios with integrity metadata
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
* @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
gfp_t gfp_mask, struct bio_set *bs)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
if (bip == NULL)
return -EIO;
memcpy(bip->bip_vec, bip_src->bip_vec,
bip_src->bip_vcnt * sizeof(struct bio_vec));
bip->bip_sector = bip_src->bip_sector;
bip->bip_vcnt = bip_src->bip_vcnt;
bip->bip_idx = bip_src->bip_idx;
return 0;
}
EXPORT_SYMBOL(bio_integrity_clone);
int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES);
if (bs->bio_integrity_pool)
return 0;
bs->bio_integrity_pool =
mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab);
if (!bs->bio_integrity_pool)
return -1;
return 0;
}
EXPORT_SYMBOL(bioset_integrity_create);
void bioset_integrity_free(struct bio_set *bs)
{
if (bs->bio_integrity_pool)
mempool_destroy(bs->bio_integrity_pool);
}
EXPORT_SYMBOL(bioset_integrity_free);
void __init bio_integrity_init(void)
{
unsigned int i;
/*
* kintegrityd won't block much but may burn a lot of CPU cycles.
* Make it highpri CPU intensive wq with max concurrency of 1.
*/
kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
if (!kintegrityd_wq)
panic("Failed to create kintegrityd\n");
for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) {
unsigned int size;
size = sizeof(struct bio_integrity_payload)
+ bip_slab[i].nr_vecs * sizeof(struct bio_vec);
bip_slab[i].slab =
kmem_cache_create(bip_slab[i].name, size, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
}
}
| gpl-2.0 |
mifl/android_kernel_pantech_ef65s | drivers/video/bf54x-lq043fb.c | 4943 | 19050 | /*
* File: drivers/video/bf54x-lq043.c
* Based on:
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
* Description: ADSP-BF54x Framebuffer driver
*
*
* Modified:
* Copyright 2007-2008 Analog Devices Inc.
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/device.h>
#include <linux/backlight.h>
#include <linux/lcd.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <asm/blackfin.h>
#include <asm/irq.h>
#include <asm/dpmc.h>
#include <asm/dma-mapping.h>
#include <asm/dma.h>
#include <asm/gpio.h>
#include <asm/portmux.h>
#include <mach/bf54x-lq043.h>
#define NO_BL_SUPPORT
#define DRIVER_NAME "bf54x-lq043"
static char driver_name[] = DRIVER_NAME;
#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
#define EPPI0_18 {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, \
P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, \
P_PPI0_D11, P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15, P_PPI0_D16, P_PPI0_D17, 0}
#define EPPI0_24 {P_PPI0_D18, P_PPI0_D19, P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23, 0}
struct bfin_bf54xfb_info {
struct fb_info *fb;
struct device *dev;
struct bfin_bf54xfb_mach_info *mach_info;
unsigned char *fb_buffer; /* RGB Buffer */
dma_addr_t dma_handle;
int lq043_open_cnt;
int irq;
spinlock_t lock; /* lock */
};
static int nocursor;
module_param(nocursor, int, 0644);
MODULE_PARM_DESC(nocursor, "cursor enable/disable");
static int outp_rgb666;
module_param(outp_rgb666, int, 0);
MODULE_PARM_DESC(outp_rgb666, "Output 18-bit RGB666");
#define LCD_X_RES 480 /*Horizontal Resolution */
#define LCD_Y_RES 272 /* Vertical Resolution */
#define LCD_BPP 24 /* Bit Per Pixel */
#define DMA_BUS_SIZE 32
/* -- Horizontal synchronizing --
*
* Timing characteristics taken from the SHARP LQ043T1DG01 datasheet
* (LCY-W-06602A Page 9 of 22)
*
* Clock Frequency 1/Tc Min 7.83 Typ 9.00 Max 9.26 MHz
*
* Period TH - 525 - Clock
* Pulse width THp - 41 - Clock
* Horizontal period THd - 480 - Clock
* Back porch THb - 2 - Clock
* Front porch THf - 2 - Clock
*
* -- Vertical synchronizing --
* Period TV - 286 - Line
* Pulse width TVp - 10 - Line
* Vertical period TVd - 272 - Line
* Back porch TVb - 2 - Line
* Front porch TVf - 2 - Line
*/
#define LCD_CLK (8*1000*1000) /* 8MHz */
/* # active data to transfer after Horizontal Delay clock */
#define EPPI_HCOUNT LCD_X_RES
/* # active lines to transfer after Vertical Delay clock */
#define EPPI_VCOUNT LCD_Y_RES
/* Samples per Line = 480 (active data) + 45 (padding) */
#define EPPI_LINE 525
/* Lines per Frame = 272 (active data) + 14 (padding) */
#define EPPI_FRAME 286
/* FS1 (Hsync) Width (Typical)*/
#define EPPI_FS1W_HBL 41
/* FS1 (Hsync) Period (Typical) */
#define EPPI_FS1P_AVPL EPPI_LINE
/* Horizontal Delay clock after assertion of Hsync (Typical) */
#define EPPI_HDELAY 43
/* FS2 (Vsync) Width = FS1 (Hsync) Period * 10 */
#define EPPI_FS2W_LVB (EPPI_LINE * 10)
/* FS2 (Vsync) Period = FS1 (Hsync) Period * Lines per Frame */
#define EPPI_FS2P_LAVF (EPPI_LINE * EPPI_FRAME)
/* Vertical Delay after assertion of Vsync (2 Lines) */
#define EPPI_VDELAY 12
#define EPPI_CLIP 0xFF00FF00
/* EPPI Control register configuration value for RGB out
* - EPPI as Output
* GP 2 frame sync mode,
* Internal Clock generation disabled, Internal FS generation enabled,
* Receives samples on EPPI_CLK raising edge, Transmits samples on EPPI_CLK falling edge,
* FS1 & FS2 are active high,
* DLEN = 6 (24 bits for RGB888 out) or 5 (18 bits for RGB666 out)
* DMA Unpacking disabled when RGB Formating is enabled, otherwise DMA unpacking enabled
* Swapping Enabled,
* One (DMA) Channel Mode,
* RGB Formatting Enabled for RGB666 output, disabled for RGB888 output
* Regular watermark - when FIFO is 100% full,
* Urgent watermark - when FIFO is 75% full
*/
#define EPPI_CONTROL (0x20136E2E | SWAPEN)
static inline u16 get_eppi_clkdiv(u32 target_ppi_clk)
{
u32 sclk = get_sclk();
/* EPPI_CLK = (SCLK) / (2 * (EPPI_CLKDIV[15:0] + 1)) */
return (((sclk / target_ppi_clk) / 2) - 1);
}
static void config_ppi(struct bfin_bf54xfb_info *fbi)
{
u16 eppi_clkdiv = get_eppi_clkdiv(LCD_CLK);
bfin_write_EPPI0_FS1W_HBL(EPPI_FS1W_HBL);
bfin_write_EPPI0_FS1P_AVPL(EPPI_FS1P_AVPL);
bfin_write_EPPI0_FS2W_LVB(EPPI_FS2W_LVB);
bfin_write_EPPI0_FS2P_LAVF(EPPI_FS2P_LAVF);
bfin_write_EPPI0_CLIP(EPPI_CLIP);
bfin_write_EPPI0_FRAME(EPPI_FRAME);
bfin_write_EPPI0_LINE(EPPI_LINE);
bfin_write_EPPI0_HCOUNT(EPPI_HCOUNT);
bfin_write_EPPI0_HDELAY(EPPI_HDELAY);
bfin_write_EPPI0_VCOUNT(EPPI_VCOUNT);
bfin_write_EPPI0_VDELAY(EPPI_VDELAY);
bfin_write_EPPI0_CLKDIV(eppi_clkdiv);
/*
* DLEN = 6 (24 bits for RGB888 out) or 5 (18 bits for RGB666 out)
* RGB Formatting Enabled for RGB666 output, disabled for RGB888 output
*/
if (outp_rgb666)
bfin_write_EPPI0_CONTROL((EPPI_CONTROL & ~DLENGTH) | DLEN_18 |
RGB_FMT_EN);
else
bfin_write_EPPI0_CONTROL(((EPPI_CONTROL & ~DLENGTH) | DLEN_24) &
~RGB_FMT_EN);
}
static int config_dma(struct bfin_bf54xfb_info *fbi)
{
set_dma_config(CH_EPPI0,
set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
INTR_DISABLE, DIMENSION_2D,
DATA_SIZE_32,
DMA_NOSYNC_KEEP_DMA_BUF));
set_dma_x_count(CH_EPPI0, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
set_dma_x_modify(CH_EPPI0, DMA_BUS_SIZE / 8);
set_dma_y_count(CH_EPPI0, LCD_Y_RES);
set_dma_y_modify(CH_EPPI0, DMA_BUS_SIZE / 8);
set_dma_start_addr(CH_EPPI0, (unsigned long)fbi->fb_buffer);
return 0;
}
static int request_ports(struct bfin_bf54xfb_info *fbi)
{
u16 eppi_req_18[] = EPPI0_18;
u16 disp = fbi->mach_info->disp;
if (gpio_request_one(disp, GPIOF_OUT_INIT_HIGH, DRIVER_NAME)) {
printk(KERN_ERR "Requesting GPIO %d failed\n", disp);
return -EFAULT;
}
if (peripheral_request_list(eppi_req_18, DRIVER_NAME)) {
printk(KERN_ERR "Requesting Peripherals failed\n");
gpio_free(disp);
return -EFAULT;
}
if (!outp_rgb666) {
u16 eppi_req_24[] = EPPI0_24;
if (peripheral_request_list(eppi_req_24, DRIVER_NAME)) {
printk(KERN_ERR "Requesting Peripherals failed\n");
peripheral_free_list(eppi_req_18);
gpio_free(disp);
return -EFAULT;
}
}
return 0;
}
static void free_ports(struct bfin_bf54xfb_info *fbi)
{
u16 eppi_req_18[] = EPPI0_18;
gpio_free(fbi->mach_info->disp);
peripheral_free_list(eppi_req_18);
if (!outp_rgb666) {
u16 eppi_req_24[] = EPPI0_24;
peripheral_free_list(eppi_req_24);
}
}
static int bfin_bf54x_fb_open(struct fb_info *info, int user)
{
struct bfin_bf54xfb_info *fbi = info->par;
spin_lock(&fbi->lock);
fbi->lq043_open_cnt++;
if (fbi->lq043_open_cnt <= 1) {
bfin_write_EPPI0_CONTROL(0);
SSYNC();
config_dma(fbi);
config_ppi(fbi);
/* start dma */
enable_dma(CH_EPPI0);
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN);
}
spin_unlock(&fbi->lock);
return 0;
}
static int bfin_bf54x_fb_release(struct fb_info *info, int user)
{
struct bfin_bf54xfb_info *fbi = info->par;
spin_lock(&fbi->lock);
fbi->lq043_open_cnt--;
if (fbi->lq043_open_cnt <= 0) {
bfin_write_EPPI0_CONTROL(0);
SSYNC();
disable_dma(CH_EPPI0);
}
spin_unlock(&fbi->lock);
return 0;
}
static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
switch (var->bits_per_pixel) {
case 24:/* TRUECOLOUR, 16m */
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = var->green.length = var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
var->transp.msb_right = 0;
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
break;
default:
pr_debug("%s: depth not supported: %u BPP\n", __func__,
var->bits_per_pixel);
return -EINVAL;
}
if (info->var.xres != var->xres || info->var.yres != var->yres ||
info->var.xres_virtual != var->xres_virtual ||
info->var.yres_virtual != var->yres_virtual) {
pr_debug("%s: Resolution not supported: X%u x Y%u \n",
__func__, var->xres, var->yres);
return -EINVAL;
}
/*
* Memory limit
*/
if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
__func__, var->yres_virtual);
return -ENOMEM;
}
return 0;
}
int bfin_bf54x_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
if (nocursor)
return 0;
else
return -EINVAL; /* just to force soft_cursor() call */
}
static int bfin_bf54x_fb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp,
struct fb_info *info)
{
if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
return -EINVAL;
if (info->var.grayscale) {
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 value;
/* Place color in the pseudopalette */
if (regno > 16)
return -EINVAL;
red >>= (16 - info->var.red.length);
green >>= (16 - info->var.green.length);
blue >>= (16 - info->var.blue.length);
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
value &= 0xFFFFFF;
((u32 *) (info->pseudo_palette))[regno] = value;
}
return 0;
}
static struct fb_ops bfin_bf54x_fb_ops = {
.owner = THIS_MODULE,
.fb_open = bfin_bf54x_fb_open,
.fb_release = bfin_bf54x_fb_release,
.fb_check_var = bfin_bf54x_fb_check_var,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_cursor = bfin_bf54x_fb_cursor,
.fb_setcolreg = bfin_bf54x_fb_setcolreg,
};
#ifndef NO_BL_SUPPORT
static int bl_get_brightness(struct backlight_device *bd)
{
return 0;
}
static const struct backlight_ops bfin_lq043fb_bl_ops = {
.get_brightness = bl_get_brightness,
};
static struct backlight_device *bl_dev;
static int bfin_lcd_get_power(struct lcd_device *dev)
{
return 0;
}
static int bfin_lcd_set_power(struct lcd_device *dev, int power)
{
return 0;
}
static int bfin_lcd_get_contrast(struct lcd_device *dev)
{
return 0;
}
static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
{
return 0;
}
static int bfin_lcd_check_fb(struct lcd_device *dev, struct fb_info *fi)
{
if (!fi || (fi == &bfin_bf54x_fb))
return 1;
return 0;
}
static struct lcd_ops bfin_lcd_ops = {
.get_power = bfin_lcd_get_power,
.set_power = bfin_lcd_set_power,
.get_contrast = bfin_lcd_get_contrast,
.set_contrast = bfin_lcd_set_contrast,
.check_fb = bfin_lcd_check_fb,
};
static struct lcd_device *lcd_dev;
#endif
static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
{
/*struct bfin_bf54xfb_info *info = dev_id;*/
u16 status = bfin_read_EPPI0_STATUS();
bfin_write_EPPI0_STATUS(0xFFFF);
if (status) {
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN);
disable_dma(CH_EPPI0);
/* start dma */
enable_dma(CH_EPPI0);
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN);
bfin_write_EPPI0_STATUS(0xFFFF);
}
return IRQ_HANDLED;
}
static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
{
#ifndef NO_BL_SUPPORT
struct backlight_properties props;
#endif
struct bfin_bf54xfb_info *info;
struct fb_info *fbinfo;
int ret;
printk(KERN_INFO DRIVER_NAME ": FrameBuffer initializing...\n");
if (request_dma(CH_EPPI0, "CH_EPPI0") < 0) {
printk(KERN_ERR DRIVER_NAME
": couldn't request CH_EPPI0 DMA\n");
ret = -EFAULT;
goto out1;
}
fbinfo =
framebuffer_alloc(sizeof(struct bfin_bf54xfb_info), &pdev->dev);
if (!fbinfo) {
ret = -ENOMEM;
goto out2;
}
info = fbinfo->par;
info->fb = fbinfo;
info->dev = &pdev->dev;
platform_set_drvdata(pdev, fbinfo);
strcpy(fbinfo->fix.id, driver_name);
info->mach_info = pdev->dev.platform_data;
if (info->mach_info == NULL) {
dev_err(&pdev->dev,
"no platform data for lcd, cannot attach\n");
ret = -EINVAL;
goto out3;
}
fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
fbinfo->fix.type_aux = 0;
fbinfo->fix.xpanstep = 0;
fbinfo->fix.ypanstep = 0;
fbinfo->fix.ywrapstep = 0;
fbinfo->fix.accel = FB_ACCEL_NONE;
fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
fbinfo->var.nonstd = 0;
fbinfo->var.activate = FB_ACTIVATE_NOW;
fbinfo->var.height = info->mach_info->height;
fbinfo->var.width = info->mach_info->width;
fbinfo->var.accel_flags = 0;
fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
fbinfo->fbops = &bfin_bf54x_fb_ops;
fbinfo->flags = FBINFO_FLAG_DEFAULT;
fbinfo->var.xres = info->mach_info->xres.defval;
fbinfo->var.xres_virtual = info->mach_info->xres.defval;
fbinfo->var.yres = info->mach_info->yres.defval;
fbinfo->var.yres_virtual = info->mach_info->yres.defval;
fbinfo->var.bits_per_pixel = info->mach_info->bpp.defval;
fbinfo->var.upper_margin = 0;
fbinfo->var.lower_margin = 0;
fbinfo->var.vsync_len = 0;
fbinfo->var.left_margin = 0;
fbinfo->var.right_margin = 0;
fbinfo->var.hsync_len = 0;
fbinfo->var.red.offset = 16;
fbinfo->var.green.offset = 8;
fbinfo->var.blue.offset = 0;
fbinfo->var.transp.offset = 0;
fbinfo->var.red.length = 8;
fbinfo->var.green.length = 8;
fbinfo->var.blue.length = 8;
fbinfo->var.transp.length = 0;
fbinfo->fix.smem_len = info->mach_info->xres.max *
info->mach_info->yres.max * info->mach_info->bpp.max / 8;
fbinfo->fix.line_length = fbinfo->var.xres_virtual *
fbinfo->var.bits_per_pixel / 8;
info->fb_buffer =
dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
GFP_KERNEL);
if (NULL == info->fb_buffer) {
printk(KERN_ERR DRIVER_NAME
": couldn't allocate dma buffer.\n");
ret = -ENOMEM;
goto out3;
}
fbinfo->screen_base = (void *)info->fb_buffer;
fbinfo->fix.smem_start = (int)info->fb_buffer;
fbinfo->fbops = &bfin_bf54x_fb_ops;
fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
if (!fbinfo->pseudo_palette) {
printk(KERN_ERR DRIVER_NAME
"Fail to allocate pseudo_palette\n");
ret = -ENOMEM;
goto out4;
}
if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
< 0) {
printk(KERN_ERR DRIVER_NAME
"Fail to allocate colormap (%d entries)\n",
BFIN_LCD_NBR_PALETTE_ENTRIES);
ret = -EFAULT;
goto out5;
}
if (request_ports(info)) {
printk(KERN_ERR DRIVER_NAME ": couldn't request gpio port.\n");
ret = -EFAULT;
goto out6;
}
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
ret = -EINVAL;
goto out7;
}
if (request_irq(info->irq, bfin_bf54x_irq_error, 0,
"PPI ERROR", info) < 0) {
printk(KERN_ERR DRIVER_NAME
": unable to request PPI ERROR IRQ\n");
ret = -EFAULT;
goto out7;
}
if (register_framebuffer(fbinfo) < 0) {
printk(KERN_ERR DRIVER_NAME
": unable to register framebuffer.\n");
ret = -EINVAL;
goto out8;
}
#ifndef NO_BL_SUPPORT
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 255;
bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
&bfin_lq043fb_bl_ops, &props);
if (IS_ERR(bl_dev)) {
printk(KERN_ERR DRIVER_NAME
": unable to register backlight.\n");
ret = -EINVAL;
unregister_framebuffer(fbinfo);
goto out8;
}
lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
#endif
return 0;
out8:
free_irq(info->irq, info);
out7:
free_ports(info);
out6:
fb_dealloc_cmap(&fbinfo->cmap);
out5:
kfree(fbinfo->pseudo_palette);
out4:
dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
info->dma_handle);
out3:
framebuffer_release(fbinfo);
out2:
free_dma(CH_EPPI0);
out1:
platform_set_drvdata(pdev, NULL);
return ret;
}
static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct bfin_bf54xfb_info *info = fbinfo->par;
free_dma(CH_EPPI0);
free_irq(info->irq, info);
if (info->fb_buffer != NULL)
dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
info->dma_handle);
kfree(fbinfo->pseudo_palette);
fb_dealloc_cmap(&fbinfo->cmap);
#ifndef NO_BL_SUPPORT
lcd_device_unregister(lcd_dev);
backlight_device_unregister(bl_dev);
#endif
unregister_framebuffer(fbinfo);
free_ports(info);
printk(KERN_INFO DRIVER_NAME ": Unregister LCD driver.\n");
return 0;
}
#ifdef CONFIG_PM
static int bfin_bf54x_suspend(struct platform_device *pdev, pm_message_t state)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() & ~EPPI_EN);
disable_dma(CH_EPPI0);
bfin_write_EPPI0_STATUS(0xFFFF);
return 0;
}
static int bfin_bf54x_resume(struct platform_device *pdev)
{
struct fb_info *fbinfo = platform_get_drvdata(pdev);
struct bfin_bf54xfb_info *info = fbinfo->par;
if (info->lq043_open_cnt) {
bfin_write_EPPI0_CONTROL(0);
SSYNC();
config_dma(info);
config_ppi(info);
/* start dma */
enable_dma(CH_EPPI0);
bfin_write_EPPI0_CONTROL(bfin_read_EPPI0_CONTROL() | EPPI_EN);
}
return 0;
}
#else
#define bfin_bf54x_suspend NULL
#define bfin_bf54x_resume NULL
#endif
static struct platform_driver bfin_bf54x_driver = {
.probe = bfin_bf54x_probe,
.remove = __devexit_p(bfin_bf54x_remove),
.suspend = bfin_bf54x_suspend,
.resume = bfin_bf54x_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static int __init bfin_bf54x_driver_init(void)
{
return platform_driver_register(&bfin_bf54x_driver);
}
static void __exit bfin_bf54x_driver_cleanup(void)
{
platform_driver_unregister(&bfin_bf54x_driver);
}
MODULE_DESCRIPTION("Blackfin BF54x TFT LCD Driver");
MODULE_LICENSE("GPL");
module_init(bfin_bf54x_driver_init);
module_exit(bfin_bf54x_driver_cleanup);
| gpl-2.0 |
koradiavatsal/Viper-kernel | drivers/pci/pcie/pme.c | 5199 | 11432 | /*
* PCIe Native PME support
*
* Copyright (C) 2007 - 2009 Intel Corp
* Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License V2. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pcieport_if.h>
#include <linux/acpi.h>
#include <linux/pci-acpi.h>
#include <linux/pm_runtime.h>
#include "../pci.h"
#include "portdrv.h"
/*
* If this switch is set, MSI will not be used for PCIe PME signaling. This
* causes the PCIe port driver to use INTx interrupts only, but it turns out
* that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
* wake-up from system sleep states.
*/
bool pcie_pme_msi_disabled;
static int __init pcie_pme_setup(char *str)
{
if (!strncmp(str, "nomsi", 5))
pcie_pme_msi_disabled = true;
return 1;
}
__setup("pcie_pme=", pcie_pme_setup);
struct pcie_pme_service_data {
spinlock_t lock;
struct pcie_device *srv;
struct work_struct work;
bool noirq; /* Don't enable the PME interrupt used by this service. */
};
/**
* pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
* @dev: PCIe root port or event collector.
* @enable: Enable or disable the interrupt.
*/
void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
{
int rtctl_pos;
u16 rtctl;
rtctl_pos = pci_pcie_cap(dev) + PCI_EXP_RTCTL;
pci_read_config_word(dev, rtctl_pos, &rtctl);
if (enable)
rtctl |= PCI_EXP_RTCTL_PMEIE;
else
rtctl &= ~PCI_EXP_RTCTL_PMEIE;
pci_write_config_word(dev, rtctl_pos, rtctl);
}
/**
* pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
* @bus: PCI bus to scan.
*
* Scan given PCI bus and all buses under it for devices asserting PME#.
*/
static bool pcie_pme_walk_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
bool ret = false;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Skip PCIe devices in case we started from a root port. */
if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
ret = true;
}
if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
ret = true;
}
return ret;
}
/**
* pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
* @bus: Secondary bus of the bridge.
* @devfn: Device/function number to check.
*
* PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
* PCIe PME message. In such that case the bridge should use the Requester ID
* of device/function number 0 on its secondary bus.
*/
static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
{
struct pci_dev *dev;
bool found = false;
if (devfn)
return false;
dev = pci_dev_get(bus->self);
if (!dev)
return false;
if (pci_is_pcie(dev) && dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
down_read(&pci_bus_sem);
if (pcie_pme_walk_bus(bus))
found = true;
up_read(&pci_bus_sem);
}
pci_dev_put(dev);
return found;
}
/**
* pcie_pme_handle_request - Find device that generated PME and handle it.
* @port: Root port or event collector that generated the PME interrupt.
* @req_id: PCIe Requester ID of the device that generated the PME.
*/
static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
{
u8 busnr = req_id >> 8, devfn = req_id & 0xff;
struct pci_bus *bus;
struct pci_dev *dev;
bool found = false;
/* First, check if the PME is from the root port itself. */
if (port->devfn == devfn && port->bus->number == busnr) {
if (port->pme_poll)
port->pme_poll = false;
if (pci_check_pme_status(port)) {
pm_request_resume(&port->dev);
found = true;
} else {
/*
* Apparently, the root port generated the PME on behalf
* of a non-PCIe device downstream. If this is done by
* a root port, the Requester ID field in its status
* register may contain either the root port's, or the
* source device's information (PCI Express Base
* Specification, Rev. 2.0, Section 6.1.9).
*/
down_read(&pci_bus_sem);
found = pcie_pme_walk_bus(port->subordinate);
up_read(&pci_bus_sem);
}
goto out;
}
/* Second, find the bus the source device is on. */
bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
if (!bus)
goto out;
/* Next, check if the PME is from a PCIe-PCI bridge. */
found = pcie_pme_from_pci_bridge(bus, devfn);
if (found)
goto out;
/* Finally, try to find the PME source on the bus. */
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_get(dev);
if (dev->devfn == devfn) {
found = true;
break;
}
pci_dev_put(dev);
}
up_read(&pci_bus_sem);
if (found) {
/* The device is there, but we have to check its PME status. */
found = pci_check_pme_status(dev);
if (found) {
if (dev->pme_poll)
dev->pme_poll = false;
pci_wakeup_event(dev);
pm_request_resume(&dev->dev);
}
pci_dev_put(dev);
} else if (devfn) {
/*
* The device is not there, but we can still try to recover by
* assuming that the PME was reported by a PCIe-PCI bridge that
* used devfn different from zero.
*/
dev_dbg(&port->dev, "PME interrupt generated for "
"non-existent device %02x:%02x.%d\n",
busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
found = pcie_pme_from_pci_bridge(bus, 0);
}
out:
if (!found)
dev_dbg(&port->dev, "Spurious native PME interrupt!\n");
}
/**
* pcie_pme_work_fn - Work handler for PCIe PME interrupt.
* @work: Work structure giving access to service data.
*/
static void pcie_pme_work_fn(struct work_struct *work)
{
struct pcie_pme_service_data *data =
container_of(work, struct pcie_pme_service_data, work);
struct pci_dev *port = data->srv->port;
int rtsta_pos;
u32 rtsta;
rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
spin_lock_irq(&data->lock);
for (;;) {
if (data->noirq)
break;
pci_read_config_dword(port, rtsta_pos, &rtsta);
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
* pending PMEs, the status will be set again.
*/
pcie_clear_root_pme_status(port);
spin_unlock_irq(&data->lock);
pcie_pme_handle_request(port, rtsta & 0xffff);
spin_lock_irq(&data->lock);
continue;
}
/* No need to loop if there are no more PMEs pending. */
if (!(rtsta & PCI_EXP_RTSTA_PENDING))
break;
spin_unlock_irq(&data->lock);
cpu_relax();
spin_lock_irq(&data->lock);
}
if (!data->noirq)
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
}
/**
* pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
* @irq: Interrupt vector.
* @context: Interrupt context pointer.
*/
static irqreturn_t pcie_pme_irq(int irq, void *context)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
int rtsta_pos;
u32 rtsta;
unsigned long flags;
port = ((struct pcie_device *)context)->port;
data = get_service_data((struct pcie_device *)context);
rtsta_pos = pci_pcie_cap(port) + PCI_EXP_RTSTA;
spin_lock_irqsave(&data->lock, flags);
pci_read_config_dword(port, rtsta_pos, &rtsta);
if (!(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
pcie_pme_interrupt_enable(port, false);
spin_unlock_irqrestore(&data->lock, flags);
/* We don't use pm_wq, because it's freezable. */
schedule_work(&data->work);
return IRQ_HANDLED;
}
/**
* pcie_pme_set_native - Set the PME interrupt flag for given device.
* @dev: PCI device to handle.
* @ign: Ignored.
*/
static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
{
dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
device_set_run_wake(&dev->dev, true);
dev->pme_interrupt = true;
return 0;
}
/**
* pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
* @port: PCIe root port or event collector to handle.
*
* For each device below given root port, including the port itself (or for each
* root complex integrated endpoint if @port is a root complex event collector)
* set the flag indicating that it can signal run-time wake-up events via PCIe
* PME interrupts.
*/
static void pcie_pme_mark_devices(struct pci_dev *port)
{
pcie_pme_set_native(port, NULL);
if (port->subordinate) {
pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
} else {
struct pci_bus *bus = port->bus;
struct pci_dev *dev;
/* Check if this is a root port event collector. */
if (port->pcie_type != PCI_EXP_TYPE_RC_EC || !bus)
return;
down_read(&pci_bus_sem);
list_for_each_entry(dev, &bus->devices, bus_list)
if (pci_is_pcie(dev)
&& dev->pcie_type == PCI_EXP_TYPE_RC_END)
pcie_pme_set_native(dev, NULL);
up_read(&pci_bus_sem);
}
}
/**
* pcie_pme_probe - Initialize PCIe PME service for given root port.
* @srv: PCIe service to initialize.
*/
static int pcie_pme_probe(struct pcie_device *srv)
{
struct pci_dev *port;
struct pcie_pme_service_data *data;
int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_init(&data->lock);
INIT_WORK(&data->work, pcie_pme_work_fn);
data->srv = srv;
set_service_data(srv, data);
port = srv->port;
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
if (ret) {
kfree(data);
} else {
pcie_pme_mark_devices(port);
pcie_pme_interrupt_enable(port, true);
}
return ret;
}
/**
* pcie_pme_suspend - Suspend PCIe PME service device.
* @srv: PCIe service device to suspend.
*/
static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
spin_lock_irq(&data->lock);
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
data->noirq = true;
spin_unlock_irq(&data->lock);
synchronize_irq(srv->irq);
return 0;
}
/**
* pcie_pme_resume - Resume PCIe PME service device.
* @srv - PCIe service device to resume.
*/
static int pcie_pme_resume(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
spin_lock_irq(&data->lock);
data->noirq = false;
pcie_clear_root_pme_status(port);
pcie_pme_interrupt_enable(port, true);
spin_unlock_irq(&data->lock);
return 0;
}
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
* @srv - PCIe service device to resume.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
pcie_pme_suspend(srv);
free_irq(srv->irq, srv);
kfree(get_service_data(srv));
}
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
.service = PCIE_PORT_SERVICE_PME,
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
/**
* pcie_pme_service_init - Register the PCIe PME service driver.
*/
static int __init pcie_pme_service_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
module_init(pcie_pme_service_init);
| gpl-2.0 |
AD5GB/WickedElemental-N5 | net/mac80211/aes_cmac.c | 5711 | 2704 | /*
* AES-128-CMAC with TLen 16 for IEEE 802.11w BIP
* Copyright 2008, Jouni Malinen <j@w1.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
#include "aes_cmac.h"
#define AES_CMAC_KEY_LEN 16
#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
#define AAD_LEN 20
static void gf_mulx(u8 *pad)
{
int i, carry;
carry = pad[0] & 0x80;
for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
pad[AES_BLOCK_SIZE - 1] <<= 1;
if (carry)
pad[AES_BLOCK_SIZE - 1] ^= 0x87;
}
static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
u8 scratch[2 * AES_BLOCK_SIZE];
u8 *cbc, *pad;
const u8 *pos, *end;
size_t i, e, left, total_len;
cbc = scratch;
pad = scratch + AES_BLOCK_SIZE;
memset(cbc, 0, AES_BLOCK_SIZE);
total_len = 0;
for (e = 0; e < num_elem; e++)
total_len += len[e];
left = total_len;
e = 0;
pos = addr[0];
end = pos + len[0];
while (left >= AES_BLOCK_SIZE) {
for (i = 0; i < AES_BLOCK_SIZE; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
if (left > AES_BLOCK_SIZE)
crypto_cipher_encrypt_one(tfm, cbc, cbc);
left -= AES_BLOCK_SIZE;
}
memset(pad, 0, AES_BLOCK_SIZE);
crypto_cipher_encrypt_one(tfm, pad, pad);
gf_mulx(pad);
if (left || total_len == 0) {
for (i = 0; i < left; i++) {
cbc[i] ^= *pos++;
if (pos >= end) {
e++;
pos = addr[e];
end = pos + len[e];
}
}
cbc[left] ^= 0x80;
gf_mulx(pad);
}
for (i = 0; i < AES_BLOCK_SIZE; i++)
pad[i] ^= cbc[i];
crypto_cipher_encrypt_one(tfm, pad, pad);
memcpy(mac, pad, CMAC_TLEN);
}
void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
const u8 *data, size_t data_len, u8 *mic)
{
const u8 *addr[3];
size_t len[3];
u8 zero[CMAC_TLEN];
memset(zero, 0, CMAC_TLEN);
addr[0] = aad;
len[0] = AAD_LEN;
addr[1] = data;
len[1] = data_len - CMAC_TLEN;
addr[2] = zero;
len[2] = CMAC_TLEN;
aes_128_cmac_vector(tfm, 3, addr, len, mic);
}
struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
{
struct crypto_cipher *tfm;
tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (!IS_ERR(tfm))
crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
return tfm;
}
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
{
crypto_free_cipher(tfm);
}
| gpl-2.0 |
chillstep1998/AK-OnePone | scripts/dtc/libfdt/fdt_ro.c | 5967 | 14037 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_nodename_eq(const void *fdt, int offset,
const char *s, int len)
{
const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
if (! p)
/* short match */
return 0;
if (memcmp(p, s, len) != 0)
return 0;
if (p[len] == '\0')
return 1;
else if (!memchr(s, '@', len) && (p[len] == '@'))
return 1;
else
return 0;
}
const char *fdt_string(const void *fdt, int stroffset)
{
return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
}
static int _fdt_string_eq(const void *fdt, int stroffset,
const char *s, int len)
{
const char *p = fdt_string(fdt, stroffset);
return (strlen(p) == len) && (memcmp(p, s, len) == 0);
}
int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
{
FDT_CHECK_HEADER(fdt);
*address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
*size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
return 0;
}
int fdt_num_mem_rsv(const void *fdt)
{
int i = 0;
while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
i++;
return i;
}
static int _nextprop(const void *fdt, int offset)
{
uint32_t tag;
int nextoffset;
do {
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_END:
if (nextoffset >= 0)
return -FDT_ERR_BADSTRUCTURE;
else
return nextoffset;
case FDT_PROP:
return offset;
}
offset = nextoffset;
} while (tag == FDT_NOP);
return -FDT_ERR_NOTFOUND;
}
int fdt_subnode_offset_namelen(const void *fdt, int offset,
const char *name, int namelen)
{
int depth;
FDT_CHECK_HEADER(fdt);
for (depth = 0;
(offset >= 0) && (depth >= 0);
offset = fdt_next_node(fdt, offset, &depth))
if ((depth == 1)
&& _fdt_nodename_eq(fdt, offset, name, namelen))
return offset;
if (depth < 0)
return -FDT_ERR_NOTFOUND;
return offset; /* error */
}
int fdt_subnode_offset(const void *fdt, int parentoffset,
const char *name)
{
return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
}
int fdt_path_offset(const void *fdt, const char *path)
{
const char *end = path + strlen(path);
const char *p = path;
int offset = 0;
FDT_CHECK_HEADER(fdt);
/* see if we have an alias */
if (*path != '/') {
const char *q = strchr(path, '/');
if (!q)
q = end;
p = fdt_get_alias_namelen(fdt, p, q - p);
if (!p)
return -FDT_ERR_BADPATH;
offset = fdt_path_offset(fdt, p);
p = q;
}
while (*p) {
const char *q;
while (*p == '/')
p++;
if (! *p)
return offset;
q = strchr(p, '/');
if (! q)
q = end;
offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
if (offset < 0)
return offset;
p = q;
}
return offset;
}
const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
{
const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
int err;
if (((err = fdt_check_header(fdt)) != 0)
|| ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
goto fail;
if (len)
*len = strlen(nh->name);
return nh->name;
fail:
if (len)
*len = err;
return NULL;
}
int fdt_first_property_offset(const void *fdt, int nodeoffset)
{
int offset;
if ((offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
return offset;
return _nextprop(fdt, offset);
}
int fdt_next_property_offset(const void *fdt, int offset)
{
if ((offset = _fdt_check_prop_offset(fdt, offset)) < 0)
return offset;
return _nextprop(fdt, offset);
}
const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
int offset,
int *lenp)
{
int err;
const struct fdt_property *prop;
if ((err = _fdt_check_prop_offset(fdt, offset)) < 0) {
if (lenp)
*lenp = err;
return NULL;
}
prop = _fdt_offset_ptr(fdt, offset);
if (lenp)
*lenp = fdt32_to_cpu(prop->len);
return prop;
}
const struct fdt_property *fdt_get_property_namelen(const void *fdt,
int offset,
const char *name,
int namelen, int *lenp)
{
for (offset = fdt_first_property_offset(fdt, offset);
(offset >= 0);
(offset = fdt_next_property_offset(fdt, offset))) {
const struct fdt_property *prop;
if (!(prop = fdt_get_property_by_offset(fdt, offset, lenp))) {
offset = -FDT_ERR_INTERNAL;
break;
}
if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff),
name, namelen))
return prop;
}
if (lenp)
*lenp = offset;
return NULL;
}
const struct fdt_property *fdt_get_property(const void *fdt,
int nodeoffset,
const char *name, int *lenp)
{
return fdt_get_property_namelen(fdt, nodeoffset, name,
strlen(name), lenp);
}
const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
const char *name, int namelen, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
if (! prop)
return NULL;
return prop->data;
}
const void *fdt_getprop_by_offset(const void *fdt, int offset,
const char **namep, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property_by_offset(fdt, offset, lenp);
if (!prop)
return NULL;
if (namep)
*namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
return prop->data;
}
const void *fdt_getprop(const void *fdt, int nodeoffset,
const char *name, int *lenp)
{
return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
}
uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
{
const uint32_t *php;
int len;
/* FIXME: This is a bit sub-optimal, since we potentially scan
* over all the properties twice. */
php = fdt_getprop(fdt, nodeoffset, "phandle", &len);
if (!php || (len != sizeof(*php))) {
php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
if (!php || (len != sizeof(*php)))
return 0;
}
return fdt32_to_cpu(*php);
}
const char *fdt_get_alias_namelen(const void *fdt,
const char *name, int namelen)
{
int aliasoffset;
aliasoffset = fdt_path_offset(fdt, "/aliases");
if (aliasoffset < 0)
return NULL;
return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL);
}
const char *fdt_get_alias(const void *fdt, const char *name)
{
return fdt_get_alias_namelen(fdt, name, strlen(name));
}
int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
{
int pdepth = 0, p = 0;
int offset, depth, namelen;
const char *name;
FDT_CHECK_HEADER(fdt);
if (buflen < 2)
return -FDT_ERR_NOSPACE;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
while (pdepth > depth) {
do {
p--;
} while (buf[p-1] != '/');
pdepth--;
}
if (pdepth >= depth) {
name = fdt_get_name(fdt, offset, &namelen);
if (!name)
return namelen;
if ((p + namelen + 1) <= buflen) {
memcpy(buf + p, name, namelen);
p += namelen;
buf[p++] = '/';
pdepth++;
}
}
if (offset == nodeoffset) {
if (pdepth < (depth + 1))
return -FDT_ERR_NOSPACE;
if (p > 1) /* special case so that root path is "/", not "" */
p--;
buf[p] = '\0';
return 0;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
int supernodedepth, int *nodedepth)
{
int offset, depth;
int supernodeoffset = -FDT_ERR_INTERNAL;
FDT_CHECK_HEADER(fdt);
if (supernodedepth < 0)
return -FDT_ERR_NOTFOUND;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
if (depth == supernodedepth)
supernodeoffset = offset;
if (offset == nodeoffset) {
if (nodedepth)
*nodedepth = depth;
if (supernodedepth > depth)
return -FDT_ERR_NOTFOUND;
else
return supernodeoffset;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_node_depth(const void *fdt, int nodeoffset)
{
int nodedepth;
int err;
err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
if (err)
return (err < 0) ? err : -FDT_ERR_INTERNAL;
return nodedepth;
}
int fdt_parent_offset(const void *fdt, int nodeoffset)
{
int nodedepth = fdt_node_depth(fdt, nodeoffset);
if (nodedepth < 0)
return nodedepth;
return fdt_supernode_atdepth_offset(fdt, nodeoffset,
nodedepth - 1, NULL);
}
int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
const char *propname,
const void *propval, int proplen)
{
int offset;
const void *val;
int len;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_getprop(), then if that didn't
* find what we want, we scan over them again making our way
* to the next node. Still it's the easiest to implement
* approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
val = fdt_getprop(fdt, offset, propname, &len);
if (val && (len == proplen)
&& (memcmp(val, propval, len) == 0))
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
{
int offset;
if ((phandle == 0) || (phandle == -1))
return -FDT_ERR_BADPHANDLE;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we
* potentially scan each property of a node in
* fdt_get_phandle(), then if that didn't find what
* we want, we scan over them again making our way to the next
* node. Still it's the easiest to implement approach;
* performance can come later. */
for (offset = fdt_next_node(fdt, -1, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
if (fdt_get_phandle(fdt, offset) == phandle)
return offset;
}
return offset; /* error from fdt_next_node() */
}
static int _fdt_stringlist_contains(const char *strlist, int listlen,
const char *str)
{
int len = strlen(str);
const char *p;
while (listlen >= len) {
if (memcmp(str, strlist, len+1) == 0)
return 1;
p = memchr(strlist, '\0', listlen);
if (!p)
return 0; /* malformed strlist.. */
listlen -= (p-strlist) + 1;
strlist = p + 1;
}
return 0;
}
int fdt_node_check_compatible(const void *fdt, int nodeoffset,
const char *compatible)
{
const void *prop;
int len;
prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
if (!prop)
return len;
if (_fdt_stringlist_contains(prop, len, compatible))
return 0;
else
return 1;
}
int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
const char *compatible)
{
int offset, err;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_node_check_compatible(), then if
* that didn't find what we want, we scan over them again
* making our way to the next node. Still it's the easiest to
* implement approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
err = fdt_node_check_compatible(fdt, offset, compatible);
if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
return err;
else if (err == 0)
return offset;
}
return offset; /* error from fdt_next_node() */
}
| gpl-2.0 |
Butterfly-CM/android_kernel_htc_dlxub1 | fs/hpfs/map.c | 6223 | 8540 | /*
* linux/fs/hpfs/map.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* mapping structures to memory with some minimal checks
*/
#include "hpfs_fn.h"
unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
{
return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
}
unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
struct quad_buffer_head *qbh, char *id)
{
secno sec;
if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
return NULL;
}
sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
return NULL;
}
return hpfs_map_4sectors(s, sec, qbh, 4);
}
/*
* Load first code page into kernel memory, return pointer to 256-byte array,
* first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are
* lowercasing table
*/
unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
{
struct buffer_head *bh;
secno cpds;
unsigned cpi;
unsigned char *ptr;
unsigned char *cp_table;
int i;
struct code_page_data *cpd;
struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
if (!cp) return NULL;
if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic));
brelse(bh);
return NULL;
}
if (!le32_to_cpu(cp->n_code_pages)) {
printk("HPFS: n_code_pages == 0\n");
brelse(bh);
return NULL;
}
cpds = le32_to_cpu(cp->array[0].code_page_data);
cpi = le16_to_cpu(cp->array[0].index);
brelse(bh);
if (cpi >= 3) {
printk("HPFS: Code page index out of array\n");
return NULL;
}
if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
printk("HPFS: Code page index out of sector\n");
brelse(bh);
return NULL;
}
ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
printk("HPFS: out of memory for code page table\n");
brelse(bh);
return NULL;
}
memcpy(cp_table, ptr, 128);
brelse(bh);
/* Try to build lowercasing table from uppercasing one */
for (i=128; i<256; i++) cp_table[i]=i;
for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128)
cp_table[cp_table[i-128]] = i;
return cp_table;
}
secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
{
struct buffer_head *bh;
int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
int i;
secno *b;
if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
printk("HPFS: can't allocate memory for bitmap directory\n");
return NULL;
}
for (i=0;i<n;i++) {
secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
if (!d) {
kfree(b);
return NULL;
}
memcpy((char *)b + 512 * i, d, 512);
brelse(bh);
}
return b;
}
/*
* Load fnode to memory
*/
struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp)
{
struct fnode *fnode;
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) {
return NULL;
}
if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) {
if (hpfs_sb(s)->sb_chk) {
struct extended_attribute *ea;
struct extended_attribute *ea_end;
if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
hpfs_error(s, "bad magic on fnode %08lx",
(unsigned long)ino);
goto bail;
}
if (!fnode->dirflag) {
if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
(fnode->btree.internal ? 12 : 8)) {
hpfs_error(s,
"bad number of nodes in fnode %08lx",
(unsigned long)ino);
goto bail;
}
if (le16_to_cpu(fnode->btree.first_free) !=
8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
hpfs_error(s,
"bad first_free pointer in fnode %08lx",
(unsigned long)ino);
goto bail;
}
}
if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
hpfs_error(s,
"bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
(unsigned long)ino,
le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
goto bail;
}
ea = fnode_ea(fnode);
ea_end = fnode_end_ea(fnode);
while (ea != ea_end) {
if (ea > ea_end) {
hpfs_error(s, "bad EA in fnode %08lx",
(unsigned long)ino);
goto bail;
}
ea = next_ea(ea);
}
}
}
return fnode;
bail:
brelse(*bhp);
return NULL;
}
struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp)
{
struct anode *anode;
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
if (hpfs_sb(s)->sb_chk) {
if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
hpfs_error(s, "bad magic on anode %08x", ano);
goto bail;
}
if (le32_to_cpu(anode->self) != ano) {
hpfs_error(s, "self pointer invalid on anode %08x", ano);
goto bail;
}
if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
(anode->btree.internal ? 60 : 40)) {
hpfs_error(s, "bad number of nodes in anode %08x", ano);
goto bail;
}
if (le16_to_cpu(anode->btree.first_free) !=
8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
hpfs_error(s, "bad first_free pointer in anode %08x", ano);
goto bail;
}
}
return anode;
bail:
brelse(*bhp);
return NULL;
}
/*
* Load dnode to memory and do some checks
*/
struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
struct quad_buffer_head *qbh)
{
struct dnode *dnode;
if (hpfs_sb(s)->sb_chk) {
if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL;
if (secno & 3) {
hpfs_error(s, "dnode %08x not byte-aligned", secno);
return NULL;
}
}
if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
if (hpfs_sb(s)->sb_chk) {
unsigned p, pp = 0;
unsigned char *d = (unsigned char *)dnode;
int b = 0;
if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
hpfs_error(s, "bad magic on dnode %08x", secno);
goto bail;
}
if (le32_to_cpu(dnode->self) != secno)
hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
/* Check dirents - bad dirents would cause infinite
loops or shooting to memory */
if (le32_to_cpu(dnode->first_free) > 2048) {
hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
goto bail;
}
for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
goto bail;
}
if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
goto bail;
}
ok:
if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down;
if (de->down) if (de_down_pointer(de) < 0x10) {
hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
goto bail;
}
pp = p;
}
if (p != le32_to_cpu(dnode->first_free)) {
hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
goto bail;
}
if (d[pp + 30] != 1 || d[pp + 31] != 255) {
hpfs_error(s, "dnode %08x does not end with \\377 entry", secno);
goto bail;
}
if (b == 3) printk("HPFS: warning: unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n", secno);
}
return dnode;
bail:
hpfs_brelse4(qbh);
return NULL;
}
dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
{
struct buffer_head *bh;
struct fnode *fnode;
dnode_secno dno;
fnode = hpfs_map_fnode(s, ino, &bh);
if (!fnode)
return 0;
dno = le32_to_cpu(fnode->u.external[0].disk_secno);
brelse(bh);
return dno;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.