repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
nsingh94/caf-7x30 | arch/mips/mti-malta/malta-cmdline.c | 9648 | 1571 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Kernel command line creation using the prom monitor (YAMON) argc/argv.
*/
#include <linux/init.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
extern int prom_argc;
extern int *_prom_argv;
/*
* YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
* This macro take care of sign extension.
*/
#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
char * __init prom_getcmdline(void)
{
return &(arcs_cmdline[0]);
}
void __init prom_init_cmdline(void)
{
char *cp;
int actr;
actr = 1; /* Always ignore argv[0] */
cp = &(arcs_cmdline[0]);
while(actr < prom_argc) {
strcpy(cp, prom_argv(actr));
cp += strlen(prom_argv(actr));
*cp++ = ' ';
actr++;
}
if (cp != &(arcs_cmdline[0])) {
/* get rid of trailing space */
--cp;
*cp = '\0';
}
}
| gpl-2.0 |
AndroidForWave/android_kernel_samsung_wave | drivers/infiniband/hw/qib/qib_verbs_mcast.c | 12208 | 8560 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/rculist.h>
#include "qib.h"
/**
* qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
{
struct qib_mcast_qp *mqp;
mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
if (!mqp)
goto bail;
mqp->qp = qp;
atomic_inc(&qp->refcount);
bail:
return mqp;
}
static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
{
struct qib_qp *qp = mqp->qp;
/* Notify qib_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
kfree(mqp);
}
/**
* qib_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
*
* A list of QPs will be attached to this structure.
*/
static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
{
struct qib_mcast *mcast;
mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
if (!mcast)
goto bail;
mcast->mgid = *mgid;
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
mcast->n_attached = 0;
bail:
return mcast;
}
static void qib_mcast_free(struct qib_mcast *mcast)
{
struct qib_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
qib_mcast_qp_free(p);
kfree(mcast);
}
/**
* qib_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
* Returns NULL if not found.
*
* The caller is responsible for decrementing the reference count if found.
*/
struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
struct qib_mcast *mcast;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else {
atomic_inc(&mcast->refcount);
spin_unlock_irqrestore(&ibp->lock, flags);
goto bail;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
mcast = NULL;
bail:
return mcast;
}
/**
* qib_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
* Return zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
int ret;
spin_lock_irq(&ibp->lock);
while (*n) {
struct qib_mcast *tmcast;
struct qib_mcast_qp *p;
pn = *n;
tmcast = rb_entry(pn, struct qib_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0) {
n = &pn->rb_left;
continue;
}
if (ret > 0) {
n = &pn->rb_right;
continue;
}
/* Search the QP list to see if this is already there. */
list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
if (p->qp == mqp->qp) {
ret = ESRCH;
goto bail;
}
}
if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
ret = ENOMEM;
goto bail;
}
tmcast->n_attached++;
list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
ret = EEXIST;
goto bail;
}
spin_lock(&dev->n_mcast_grps_lock);
if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
spin_unlock(&dev->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
dev->n_mcast_grps_allocated++;
spin_unlock(&dev->n_mcast_grps_lock);
mcast->n_attached++;
list_add_tail_rcu(&mqp->list, &mcast->qp_list);
atomic_inc(&mcast->refcount);
rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
ret = 0;
bail:
spin_unlock_irq(&ibp->lock);
return ret;
}
int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp;
struct qib_mcast *mcast;
struct qib_mcast_qp *mqp;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
mcast = qib_mcast_alloc(gid);
if (mcast == NULL) {
ret = -ENOMEM;
goto bail;
}
mqp = qib_mcast_qp_alloc(qp);
if (mqp == NULL) {
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
}
ibp = to_iport(ibqp->device, qp->port_num);
switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
case ESRCH:
/* Neither was used: OK to attach the same QP twice. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
break;
case EEXIST: /* The mcast wasn't used */
qib_mcast_free(mcast);
break;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
qib_mcast_qp_free(mqp);
qib_mcast_free(mcast);
ret = -ENOMEM;
goto bail;
default:
break;
}
ret = 0;
bail:
return ret;
}
int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
struct qib_mcast *mcast = NULL;
struct qib_mcast_qp *p, *tmp;
struct rb_node *n;
int last = 0;
int ret;
if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
ret = -EINVAL;
goto bail;
}
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
if (n == NULL) {
spin_unlock_irq(&ibp->lock);
ret = -EINVAL;
goto bail;
}
mcast = rb_entry(n, struct qib_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
break;
}
/* Search the QP list. */
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
if (p->qp != qp)
continue;
/*
* We found it, so remove it, but don't poison the forward
* link until we are sure there are no list walkers.
*/
list_del_rcu(&p->list);
mcast->n_attached--;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
rb_erase(&mcast->rb_node, &ibp->mcast_tree);
last = 1;
}
break;
}
spin_unlock_irq(&ibp->lock);
if (p) {
/*
* Wait for any list walkers to finish before freeing the
* list element.
*/
wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
qib_mcast_qp_free(p);
}
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
qib_mcast_free(mcast);
spin_lock_irq(&dev->n_mcast_grps_lock);
dev->n_mcast_grps_allocated--;
spin_unlock_irq(&dev->n_mcast_grps_lock);
}
ret = 0;
bail:
return ret;
}
int qib_mcast_tree_empty(struct qib_ibport *ibp)
{
return ibp->mcast_tree.rb_node == NULL;
}
| gpl-2.0 |
SlimLG2/msm8974_caf_G2 | drivers/power/pmu_battery.c | 12720 | 5424 | /*
* Battery class driver for Apple PMU
*
* Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/power_supply.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/slab.h>
static struct pmu_battery_dev {
struct power_supply bat;
struct pmu_battery_info *pbi;
char name[16];
int propval;
} *pbats[PMU_MAX_BATTERIES];
#define to_pmu_battery_dev(x) container_of(x, struct pmu_battery_dev, bat)
/*********************************************************************
* Power
*********************************************************************/
static int pmu_get_ac_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = (!!(pmu_power_flags & PMU_PWR_AC_PRESENT)) ||
(pmu_battery_count == 0);
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property pmu_ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
};
static struct power_supply pmu_ac = {
.name = "pmu-ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = pmu_ac_props,
.num_properties = ARRAY_SIZE(pmu_ac_props),
.get_property = pmu_get_ac_prop,
};
/*********************************************************************
* Battery properties
*********************************************************************/
static char *pmu_batt_types[] = {
"Smart", "Comet", "Hooper", "Unknown"
};
static char *pmu_bat_get_model_name(struct pmu_battery_info *pbi)
{
switch (pbi->flags & PMU_BATT_TYPE_MASK) {
case PMU_BATT_TYPE_SMART:
return pmu_batt_types[0];
case PMU_BATT_TYPE_COMET:
return pmu_batt_types[1];
case PMU_BATT_TYPE_HOOPER:
return pmu_batt_types[2];
default: break;
}
return pmu_batt_types[3];
}
static int pmu_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct pmu_battery_dev *pbat = to_pmu_battery_dev(psy);
struct pmu_battery_info *pbi = pbat->pbi;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (pbi->flags & PMU_BATT_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (pmu_power_flags & PMU_PWR_AC_PRESENT)
val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = !!(pbi->flags & PMU_BATT_PRESENT);
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = pmu_bat_get_model_name(pbi);
break;
case POWER_SUPPLY_PROP_ENERGY_AVG:
val->intval = pbi->charge * 1000; /* mWh -> µWh */
break;
case POWER_SUPPLY_PROP_ENERGY_FULL:
val->intval = pbi->max_charge * 1000; /* mWh -> µWh */
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
val->intval = pbi->amperage * 1000; /* mA -> µA */
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
val->intval = pbi->voltage * 1000; /* mV -> µV */
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
val->intval = pbi->time_remaining;
break;
default:
return -EINVAL;
}
return 0;
}
static enum power_supply_property pmu_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_ENERGY_AVG,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
};
/*********************************************************************
* Initialisation
*********************************************************************/
static struct platform_device *bat_pdev;
static int __init pmu_bat_init(void)
{
int ret;
int i;
bat_pdev = platform_device_register_simple("pmu-battery",
0, NULL, 0);
if (IS_ERR(bat_pdev)) {
ret = PTR_ERR(bat_pdev);
goto pdev_register_failed;
}
ret = power_supply_register(&bat_pdev->dev, &pmu_ac);
if (ret)
goto ac_register_failed;
for (i = 0; i < pmu_battery_count; i++) {
struct pmu_battery_dev *pbat = kzalloc(sizeof(*pbat),
GFP_KERNEL);
if (!pbat)
break;
sprintf(pbat->name, "PMU_battery_%d", i);
pbat->bat.name = pbat->name;
pbat->bat.properties = pmu_bat_props;
pbat->bat.num_properties = ARRAY_SIZE(pmu_bat_props);
pbat->bat.get_property = pmu_bat_get_property;
pbat->pbi = &pmu_batteries[i];
ret = power_supply_register(&bat_pdev->dev, &pbat->bat);
if (ret) {
kfree(pbat);
goto battery_register_failed;
}
pbats[i] = pbat;
}
goto success;
battery_register_failed:
while (i--) {
if (!pbats[i])
continue;
power_supply_unregister(&pbats[i]->bat);
kfree(pbats[i]);
}
power_supply_unregister(&pmu_ac);
ac_register_failed:
platform_device_unregister(bat_pdev);
pdev_register_failed:
success:
return ret;
}
static void __exit pmu_bat_exit(void)
{
int i;
for (i = 0; i < PMU_MAX_BATTERIES; i++) {
if (!pbats[i])
continue;
power_supply_unregister(&pbats[i]->bat);
kfree(pbats[i]);
}
power_supply_unregister(&pmu_ac);
platform_device_unregister(bat_pdev);
}
module_init(pmu_bat_init);
module_exit(pmu_bat_exit);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PMU battery driver");
| gpl-2.0 |
pio-masaki/at100-caspar | drivers/video/maxinefb.c | 13744 | 4674 | /*
* linux/drivers/video/maxinefb.c
*
* DECstation 5000/xx onboard framebuffer support ... derived from:
* "HP300 Topcat framebuffer support (derived from macfb of all things)
* Phil Blundell <philb@gnu.org> 1998", the original code can be
* found in the file hpfb.c in the same directory.
*
* DECstation related code Copyright (C) 1999,2000,2001 by
* Michael Engel <engel@unix-ag.org> and
* Karsten Merker <merker@linuxtag.org>.
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
*/
/*
* Changes:
* 2001/01/27 removed debugging and testing code, fixed fb_ops
* initialization which had caused a crash before,
* general cleanup, first official release (KM)
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <video/maxinefb.h>
/* bootinfo.h defines the machine type values, needed when checking */
/* whether are really running on a maxine, KM */
#include <asm/bootinfo.h>
static struct fb_info fb_info;
static struct fb_var_screeninfo maxinefb_defined = {
.xres = 1024,
.yres = 768,
.xres_virtual = 1024,
.yres_virtual = 768,
.bits_per_pixel =8,
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo maxinefb_fix = {
.id = "Maxine",
.smem_len = (1024*768),
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.line_length = 1024,
};
/* Handle the funny Inmos RamDAC/video controller ... */
void maxinefb_ims332_write_register(int regno, register unsigned int val)
{
register unsigned char *regs = (char *) MAXINEFB_IMS332_ADDRESS;
unsigned char *wptr;
wptr = regs + 0xa0000 + (regno << 4);
*((volatile unsigned int *) (regs)) = (val >> 8) & 0xff00;
*((volatile unsigned short *) (wptr)) = val;
}
unsigned int maxinefb_ims332_read_register(int regno)
{
register unsigned char *regs = (char *) MAXINEFB_IMS332_ADDRESS;
unsigned char *rptr;
register unsigned int j, k;
rptr = regs + 0x80000 + (regno << 4);
j = *((volatile unsigned short *) rptr);
k = *((volatile unsigned short *) regs);
return (j & 0xffff) | ((k & 0xff00) << 8);
}
/* Set the palette */
static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
/* value to be written into the palette reg. */
unsigned long hw_colorvalue = 0;
if (regno > 255)
return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the harware colormap */
blue >>= 8; /* registers are only 8 bits wide */
hw_colorvalue = (blue << 16) + (green << 8) + (red);
maxinefb_ims332_write_register(IMS332_REG_COLOR_PALETTE + regno,
hw_colorvalue);
return 0;
}
static struct fb_ops maxinefb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = maxinefb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
int __init maxinefb_init(void)
{
unsigned long fboff;
unsigned long fb_start;
int i;
if (fb_get_options("maxinefb", NULL))
return -ENODEV;
/* Validate we're on the proper machine type */
if (mips_machtype != MACH_DS5000_XX) {
return -EINVAL;
}
printk(KERN_INFO "Maxinefb: Personal DECstation detected\n");
printk(KERN_INFO "Maxinefb: initializing onboard framebuffer\n");
/* Framebuffer display memory base address */
fb_start = DS5000_xx_ONBOARD_FBMEM_START;
/* Clear screen */
for (fboff = fb_start; fboff < fb_start + 0x1ffff; fboff++)
*(volatile unsigned char *)fboff = 0x0;
maxinefb_fix.smem_start = fb_start;
/* erase hardware cursor */
for (i = 0; i < 512; i++) {
maxinefb_ims332_write_register(IMS332_REG_CURSOR_RAM + i,
0);
/*
if (i&0x8 == 0)
maxinefb_ims332_write_register (IMS332_REG_CURSOR_RAM + i, 0x0f);
else
maxinefb_ims332_write_register (IMS332_REG_CURSOR_RAM + i, 0xf0);
*/
}
fb_info.fbops = &maxinefb_ops;
fb_info.screen_base = (char *)maxinefb_fix.smem_start;
fb_info.var = maxinefb_defined;
fb_info.fix = maxinefb_fix;
fb_info.flags = FBINFO_DEFAULT;
fb_alloc_cmap(&fb_info.cmap, 256, 0);
if (register_framebuffer(&fb_info) < 0)
return 1;
return 0;
}
static void __exit maxinefb_exit(void)
{
unregister_framebuffer(&fb_info);
}
#ifdef MODULE
MODULE_LICENSE("GPL");
#endif
module_init(maxinefb_init);
module_exit(maxinefb_exit);
| gpl-2.0 |
fronti90/kernel_lge_geefhd | drivers/parport/probe.c | 13744 | 7589 | /*
* Parallel port device probing code
*
* Authors: Carsten Gross, carsten@sol.wohnheim.uni-ulm.de
* Philip Blundell <philb@gnu.org>
*/
#include <linux/module.h>
#include <linux/parport.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
static const struct {
const char *token;
const char *descr;
} classes[] = {
{ "", "Legacy device" },
{ "PRINTER", "Printer" },
{ "MODEM", "Modem" },
{ "NET", "Network device" },
{ "HDC", "Hard disk" },
{ "PCMCIA", "PCMCIA" },
{ "MEDIA", "Multimedia device" },
{ "FDC", "Floppy disk" },
{ "PORTS", "Ports" },
{ "SCANNER", "Scanner" },
{ "DIGICAM", "Digital camera" },
{ "", "Unknown device" },
{ "", "Unspecified" },
{ "SCSIADAPTER", "SCSI adapter" },
{ NULL, NULL }
};
static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
printk(KERN_INFO "%s", port->name);
if (device >= 0)
printk (" (addr %d)", device);
printk (": %s", classes[info->class].descr);
if (info->class)
printk(", %s %s", info->mfr, info->model);
printk("\n");
}
static void parse_data(struct parport *port, int device, char *str)
{
char *txt = kmalloc(strlen(str)+1, GFP_KERNEL);
char *p = txt, *q;
int guessed_class = PARPORT_CLASS_UNSPEC;
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
printk(KERN_WARNING "%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
while (p) {
char *sep;
q = strchr(p, ';');
if (q) *q = 0;
sep = strchr(p, ':');
if (sep) {
char *u;
*(sep++) = 0;
/* Get rid of trailing blanks */
u = sep + strlen (sep) - 1;
while (u >= p && *u == ' ')
*u-- = '\0';
u = p;
while (*u) {
*u = toupper(*u);
u++;
}
if (!strcmp(p, "MFG") || !strcmp(p, "MANUFACTURER")) {
kfree(info->mfr);
info->mfr = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "MDL") || !strcmp(p, "MODEL")) {
kfree(info->model);
info->model = kstrdup(sep, GFP_KERNEL);
} else if (!strcmp(p, "CLS") || !strcmp(p, "CLASS")) {
int i;
kfree(info->class_name);
info->class_name = kstrdup(sep, GFP_KERNEL);
for (u = sep; *u; u++)
*u = toupper(*u);
for (i = 0; classes[i].token; i++) {
if (!strcmp(classes[i].token, sep)) {
info->class = i;
goto rock_on;
}
}
printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
kfree(info->cmdset);
info->cmdset = kstrdup(sep, GFP_KERNEL);
/* if it speaks printer language, it's
probably a printer */
if (strstr(sep, "PJL") || strstr(sep, "PCL"))
guessed_class = PARPORT_CLASS_PRINTER;
} else if (!strcmp(p, "DES") || !strcmp(p, "DESCRIPTION")) {
kfree(info->description);
info->description = kstrdup(sep, GFP_KERNEL);
}
}
rock_on:
if (q)
p = q + 1;
else
p = NULL;
}
/* If the device didn't tell us its class, maybe we have managed to
guess one from the things it did say. */
if (info->class == PARPORT_CLASS_UNSPEC)
info->class = guessed_class;
pretty_print (port, device);
kfree(txt);
}
/* Read up to count-1 bytes of device id. Terminate buffer with
* '\0'. Buffer begins with two Device ID length bytes as given by
* device. */
static ssize_t parport_read_device_id (struct parport *port, char *buffer,
size_t count)
{
unsigned char length[2];
unsigned lelen, belen;
size_t idlens[4];
unsigned numidlens;
unsigned current_idlen;
ssize_t retval;
size_t len;
/* First two bytes are MSB,LSB of inclusive length. */
retval = parport_read (port, length, 2);
if (retval < 0)
return retval;
if (retval != 2)
return -EIO;
if (count < 2)
return 0;
memcpy(buffer, length, 2);
len = 2;
/* Some devices wrongly send LE length, and some send it two
* bytes short. Construct a sorted array of lengths to try. */
belen = (length[0] << 8) + length[1];
lelen = (length[1] << 8) + length[0];
idlens[0] = min(belen, lelen);
idlens[1] = idlens[0]+2;
if (belen != lelen) {
int off = 2;
/* Don't try lengths of 0x100 and 0x200 as 1 and 2 */
if (idlens[0] <= 2)
off = 0;
idlens[off] = max(belen, lelen);
idlens[off+1] = idlens[off]+2;
numidlens = off+2;
}
else {
/* Some devices don't truly implement Device ID, but
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
printk (KERN_DEBUG "%s: reported broken Device ID"
" length of %#zX bytes\n",
port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
}
/* Try to respect the given ID length despite all the bugs in
* the ID length. Read according to shortest possible ID
* first. */
for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
size_t idlen = idlens[current_idlen];
if (idlen+1 >= count)
break;
retval = parport_read (port, buffer+len, idlen-len);
if (retval < 0)
return retval;
len += retval;
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
printk (KERN_DEBUG "%s: Device ID was %zd bytes"
" while device told it would be %d"
" bytes\n",
port->name, len, belen);
}
goto done;
}
/* This might end reading the Device ID too
* soon. Hopefully the needed fields were already in
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
printk (KERN_DEBUG "%s: Device ID reading stopped"
" before device told data not available. "
"Current idlen %u of %u, len bytes %02X %02X\n",
port->name, current_idlen, numidlens,
length[0], length[1]);
goto done;
}
}
if (current_idlen < numidlens) {
/* Buffer not large enough, read to end of buffer. */
size_t idlen, len2;
if (len+1 < count) {
retval = parport_read (port, buffer+len, count-len-1);
if (retval < 0)
return retval;
len += retval;
}
/* Read the whole ID since some devices would not
* otherwise give back the Device ID from beginning
* next time when asked. */
idlen = idlens[current_idlen];
len2 = len;
while(len2 < idlen && retval > 0) {
char tmp[4];
retval = parport_read (port, tmp,
min(sizeof tmp, idlen-len2));
if (retval < 0)
return retval;
len2 += retval;
}
}
/* In addition, there are broken devices out there that don't
even finish off with a semi-colon. We do not need to care
about those at this time. */
done:
buffer[len] = '\0';
return len;
}
/* Get Std 1284 Device ID. */
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
struct pardevice *dev = parport_open (devnum, "Device ID probe");
if (!dev)
return -ENXIO;
parport_claim_or_block (dev);
/* Negotiate to compatibility mode, and then to device ID
* mode. (This so that we start form beginning of device ID if
* already in device ID mode.) */
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
retval = parport_negotiate (dev->port,
IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
if (!retval) {
retval = parport_read_device_id (dev->port, buffer, count);
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
if (retval > 2)
parse_data (dev->port, dev->daisy, buffer+2);
}
parport_release (dev);
parport_close (dev);
return retval;
}
| gpl-2.0 |
milokim/linux | drivers/crypto/qat/qat_c62x/adf_drv.c | 177 | 9720 | /*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2014 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_c62x_hw_data.h"
#define ADF_SYSTEM_DEVICE(device_id) \
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
static const struct pci_device_id adf_pci_tbl[] = {
ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
{0,}
};
MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
static void adf_remove(struct pci_dev *dev);
static struct pci_driver adf_driver = {
.id_table = adf_pci_tbl,
.name = ADF_C62X_DEVICE_NAME,
.probe = adf_probe,
.remove = adf_remove,
.sriov_configure = adf_sriov_configure,
};
static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
{
pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
}
static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
}
if (accel_dev->hw_device) {
switch (accel_pci_dev->pci_dev->device) {
case ADF_C62X_PCI_DEVICE_ID:
adf_clean_hw_data_c62x(accel_dev->hw_device);
break;
default:
break;
}
kfree(accel_dev->hw_device);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
debugfs_remove(accel_dev->debugfs_dir);
adf_devmgr_rm_dev(accel_dev, NULL);
}
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret, bar_mask;
switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID:
break;
default:
dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
return -ENODEV;
}
if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
/* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = pdev;
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
if (adf_devmgr_add_dev(accel_dev, NULL)) {
dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
kfree(accel_dev);
return -EFAULT;
}
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_c62x(accel_dev->hw_device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
&hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* If the device has no acceleration engines then ignore it. */
if (!hw_data->accel_mask || !hw_data->ae_mask ||
((~hw_data->ae_mask) & 0x01)) {
dev_err(&pdev->dev, "No acceleration units found");
ret = -EFAULT;
goto out_err;
}
/* Create dev top level debugfs entry */
snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
if (!accel_dev->debugfs_dir) {
dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
ret = -EINVAL;
goto out_err;
}
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
/* enable PCI device */
if (pci_enable_device(pdev)) {
ret = -EFAULT;
goto out_err;
}
/* set dma identifier */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
dev_err(&pdev->dev, "No usable DMA configuration\n");
ret = -EFAULT;
goto out_err_disable;
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
}
} else {
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
ret = -EFAULT;
goto out_err_disable;
}
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
&hw_data->accel_capabilities_mask);
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
if (!bar->base_addr)
break;
bar->size = pci_resource_len(pdev, bar_nr);
bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
if (!bar->virt_addr) {
dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
ret = -EFAULT;
goto out_err_free_reg;
}
}
pci_set_master(pdev);
if (adf_enable_aer(accel_dev, &adf_driver)) {
dev_err(&pdev->dev, "Failed to enable aer\n");
ret = -EFAULT;
goto out_err_free_reg;
}
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err_free_reg;
}
ret = qat_crypto_dev_config(accel_dev);
if (ret)
goto out_err_free_reg;
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
ret = adf_dev_start(accel_dev);
if (ret)
goto out_err_dev_stop;
return ret;
out_err_dev_stop:
adf_dev_stop(accel_dev);
out_err_dev_shutdown:
adf_dev_shutdown(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
pci_disable_device(accel_pci_dev->pci_dev);
out_err:
adf_cleanup_accel(accel_dev);
kfree(accel_dev);
return ret;
}
static void adf_remove(struct pci_dev *pdev)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
if (!accel_dev) {
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);
}
static int __init adfdrv_init(void)
{
request_module("intel_qat");
if (pci_register_driver(&adf_driver)) {
pr_err("QAT: Driver initialization failed\n");
return -EFAULT;
}
return 0;
}
static void __exit adfdrv_release(void)
{
pci_unregister_driver(&adf_driver);
}
module_init(adfdrv_init);
module_exit(adfdrv_release);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
| gpl-2.0 |
yuvjjang/KVMGT-kernel | drivers/target/loopback/tcm_loop.c | 177 | 42174 | /*******************************************************************************
*
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
* © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include "tcm_loop.h"
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
/* Local pointer to allocated TCM configfs fabric module */
static struct target_fabric_configfs *tcm_loop_fabric_configfs;
static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
static int tcm_loop_queue_status(struct se_cmd *se_cmd);
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
/*
* Do not release struct se_cmd's containing a valid TMR
* pointer. These will be released directly in tcm_loop_device_reset()
* with transport_generic_free_cmd().
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
return 0;
/*
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
seq_printf(m, "tcm_loop_proc_info()\n");
return 0;
}
static int tcm_loop_driver_probe(struct device *);
static int tcm_loop_driver_remove(struct device *);
static int pseudo_lld_bus_match(struct device *dev,
struct device_driver *dev_driver)
{
return 1;
}
static struct bus_type tcm_loop_lld_bus = {
.name = "tcm_loop_bus",
.match = pseudo_lld_bus_match,
.probe = tcm_loop_driver_probe,
.remove = tcm_loop_driver_remove,
};
static struct device_driver tcm_loop_driverfs = {
.name = "tcm_loop",
.bus = &tcm_loop_lld_bus,
};
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
struct device *tcm_loop_primary;
/*
* Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
* drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
*/
static int tcm_loop_change_queue_depth(
struct scsi_device *sdev,
int depth,
int reason)
{
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
case SCSI_QDEPTH_RAMP_UP:
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
default:
return -EOPNOTSUPP;
}
return sdev->queue_depth;
}
static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag);
if (tag)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag = 0;
return tag;
}
/*
* Locate the SAM Task Attr from struct scsi_cmnd *
*/
static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
{
if (sc->device->tagged_supported) {
switch (sc->tag) {
case HEAD_OF_QUEUE_TAG:
return MSG_HEAD_TAG;
case ORDERED_QUEUE_TAG:
return MSG_ORDERED_TAG;
default:
break;
}
}
return MSG_SIMPLE_TAG;
}
static void tcm_loop_submission_work(struct work_struct *work)
{
struct tcm_loop_cmd *tl_cmd =
container_of(work, struct tcm_loop_cmd, work);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
u32 sgl_bidi_count = 0;
int rc;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
/*
* Ensure that this tl_tpg reference from the incoming sc->device->id
* has already been configured via tcm_loop_make_naa_tpg().
*/
if (!tl_tpg->tl_hba) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
goto out_done;
}
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
" does not exist\n");
set_host_byte(sc, DID_ERROR);
goto out_done;
}
if (scsi_bidi_cmnd(sc)) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
sgl_bidi_count = sdb->table.nents;
se_cmd->se_cmd_flags |= SCF_BIDI;
}
rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
scsi_bufflen(sc), tcm_loop_sam_attr(sc),
sc->sc_data_direction, 0,
scsi_sglist(sc), scsi_sg_count(sc),
sgl_bidi, sgl_bidi_count,
scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
if (rc < 0) {
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
return;
out_done:
sc->scsi_done(sc);
return;
}
/*
* ->queuecommand can be and usually is called from interrupt context, so
* defer the actual submission to a workqueue.
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct tcm_loop_cmd *tl_cmd;
pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
sc->scsi_done(sc);
return 0;
}
tl_cmd->sc = sc;
tl_cmd->sc_cmd_tag = sc->tag;
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
queue_work(tcm_loop_workqueue, &tl_cmd->work);
return 0;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
struct tcm_loop_nexus *tl_nexus,
int lun, int task, enum tcm_tmreq_table tmr)
{
struct se_cmd *se_cmd = NULL;
struct se_session *se_sess;
struct se_portal_group *se_tpg;
struct tcm_loop_cmd *tl_cmd = NULL;
struct tcm_loop_tmr *tl_tmr = NULL;
int ret = TMR_FUNCTION_FAILED, rc;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
pr_err("Unable to allocate memory for tl_cmd\n");
return ret;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
se_cmd = &tl_cmd->tl_se_cmd;
se_tpg = &tl_tpg->tl_se_tpg;
se_sess = tl_nexus->se_sess;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
DMA_NONE, MSG_SIMPLE_TAG,
&tl_cmd->tl_sense_buf[0]);
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
if (rc < 0)
goto release;
if (tmr == TMR_ABORT_TASK)
se_cmd->se_tmr_req->ref_task_tag = task;
/*
* Locate the underlying TCM struct se_lun
*/
if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
ret = TMR_LUN_DOES_NOT_EXIST;
goto release;
}
/*
* Queue the TMR to TCM Core and sleep waiting for
* tcm_loop_queue_tm_rsp() to wake us up.
*/
transport_generic_handle_tmr(se_cmd);
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
/*
* The TMR LUN_RESET has completed, check the response status and
* then release allocations.
*/
ret = se_cmd->se_tmr_req->response;
release:
if (se_cmd)
transport_generic_free_cmd(se_cmd, 1);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
return ret;
}
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
sc->tag, TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
/*
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_tpg *tl_tpg;
int ret = FAILED;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
/*
* Locate the tl_nexus and se_sess pointers
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
0, TMR_LUN_RESET);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
static int tcm_loop_target_reset(struct scsi_cmnd *sc)
{
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
/*
* Locate the tcm_loop_hba_t pointer
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
if (!tl_hba) {
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
/*
* Locate the tl_tpg pointer from TargetID in sc->device->id
*/
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
if (tl_tpg) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return SUCCESS;
}
return FAILED;
}
static int tcm_loop_slave_alloc(struct scsi_device *sd)
{
set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
return 0;
}
static int tcm_loop_slave_configure(struct scsi_device *sd)
{
if (sd->tagged_supported) {
scsi_activate_tcq(sd, sd->queue_depth);
scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
sd->host->cmd_per_lun);
} else {
scsi_adjust_queue_depth(sd, 0,
sd->host->cmd_per_lun);
}
return 0;
}
static struct scsi_host_template tcm_loop_driver_template = {
.show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
.change_queue_type = tcm_loop_change_queue_type,
.eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
.eh_target_reset_handler = tcm_loop_target_reset,
.can_queue = 1024,
.this_id = -1,
.sg_tablesize = 256,
.cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.slave_configure = tcm_loop_slave_configure,
.module = THIS_MODULE,
};
static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
/*
* Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
*/
*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
/*
* Setup single ID, Channel and LUN for now..
*/
sh->max_id = 2;
sh->max_lun = 0;
sh->max_channel = 0;
sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
scsi_host_set_prot(sh, host_prot);
scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
return 0;
}
static int tcm_loop_driver_remove(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
tl_hba = to_tcm_loop_hba(dev);
sh = tl_hba->sh;
scsi_remove_host(sh);
scsi_host_put(sh);
return 0;
}
static void tcm_loop_release_adapter(struct device *dev)
{
struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
kfree(tl_hba);
}
/*
* Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
*/
static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
{
int ret;
tl_hba->dev.bus = &tcm_loop_lld_bus;
tl_hba->dev.parent = tcm_loop_primary;
tl_hba->dev.release = &tcm_loop_release_adapter;
dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
ret = device_register(&tl_hba->dev);
if (ret) {
pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
return 0;
}
/*
* Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
* tcm_loop SCSI bus.
*/
static int tcm_loop_alloc_core_bus(void)
{
int ret;
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
bus_unregister(&tcm_loop_lld_bus);
dev_unreg:
root_device_unregister(tcm_loop_primary);
return ret;
}
static void tcm_loop_release_core_bus(void)
{
driver_unregister(&tcm_loop_driverfs);
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
{
return "loopback";
}
static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
/*
* tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
* time based on the protocol dependent prefix of the passed configfs group.
*
* Based upon tl_proto_id, TCM_Loop emulates the requested fabric
* ProtocolID using target_core_fabric_lib.c symbols.
*/
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_fabric_proto_ident(se_tpg);
case SCSI_PROTOCOL_FCP:
return fc_get_fabric_proto_ident(se_tpg);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_fabric_proto_ident(se_tpg);
}
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
return &tl_tpg->tl_hba->tl_wwn_address[0];
}
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
return tl_tpg->tl_tpgt;
}
static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 tcm_loop_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
case SCSI_PROTOCOL_FCP:
return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
}
static u32 tcm_loop_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
case SCSI_PROTOCOL_FCP:
return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
}
/*
* Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
* Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
*/
static char *tcm_loop_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
case SCSI_PROTOCOL_FCP:
return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
case SCSI_PROTOCOL_ISCSI:
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
}
/*
* Returning (1) here allows for target_core_mod struct se_node_acl to be generated
* based upon the incoming fabric dependent SCSI Initiator Port
*/
static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
{
return 1;
}
static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
return 0;
}
/*
* Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
* local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
*/
static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
{
return 0;
}
/*
* Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
* never be called for TCM_Loop by target_core_fabric_configfs.c code.
* It has been added here as a nop for target_fabric_tf_ops_check()
*/
static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
{
return 0;
}
static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
struct tcm_loop_nacl *tl_nacl;
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
return &tl_nacl->se_node_acl;
}
static void tcm_loop_tpg_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl)
{
struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
struct tcm_loop_nacl, se_node_acl);
kfree(tl_nacl);
}
static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
{
return 1;
}
static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
{
return 1;
}
static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
{
return;
}
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
return tl_cmd->sc_cmd_tag;
}
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
return tl_cmd->sc_cmd_state;
}
static int tcm_loop_shutdown_session(struct se_session *se_sess)
{
return 0;
}
static void tcm_loop_close_session(struct se_session *se_sess)
{
return;
};
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
* Since Linux/SCSI has already sent down a struct scsi_cmnd
* sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
* memory, and memory has already been mapped to struct se_cmd->t_mem_list
* format with transport_generic_map_mem_to_cmd().
*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
target_execute_cmd(se_cmd);
return 0;
}
static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
{
return 0;
}
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
static int tcm_loop_queue_status(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
} else
sc->result = se_cmd->scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
sc->scsi_done(sc);
return 0;
}
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
/*
* The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
* and wake up the wait_queue_head_t in tcm_loop_device_reset()
*/
atomic_set(&tl_tmr->tmr_complete, 1);
wake_up(&tl_tmr->tl_tmr_wait);
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
{
switch (tl_hba->tl_proto_id) {
case SCSI_PROTOCOL_SAS:
return "SAS";
case SCSI_PROTOCOL_FCP:
return "FCP";
case SCSI_PROTOCOL_ISCSI:
return "iSCSI";
default:
break;
}
return "Unknown";
}
/* Start items for tcm_loop_port_cit */
static int tcm_loop_port_link(
struct se_portal_group *se_tpg,
struct se_lun *lun)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_inc();
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
static void tcm_loop_port_unlink(
struct se_portal_group *se_tpg,
struct se_lun *se_lun)
{
struct scsi_device *sd;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
tl_hba = tl_tpg->tl_hba;
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
* Remove Linux/SCSI struct scsi_device by HCTL
*/
scsi_remove_device(sd);
scsi_device_put(sd);
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
/* Start items for tcm_loop_nexus_cit */
static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg,
const char *name)
{
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
}
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
* Initiator port name of the passed configfs group 'name'.
*/
tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
se_tpg, (unsigned char *)name);
if (!tl_nexus->se_sess->se_node_acl) {
transport_free_session(tl_nexus->se_sess);
goto out;
}
/*
* Now, register the SAS I_T Nexus as active with the call to
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
out:
kfree(tl_nexus);
return ret;
}
static int tcm_loop_drop_nexus(
struct tcm_loop_tpg *tpg)
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
if (!tl_hba)
return -ENODEV;
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
se_sess = tl_nexus->se_sess;
if (!se_sess)
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated SAS Target Port
*/
transport_deregister_session(tl_nexus->se_sess);
tpg->tl_hba->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
/* End items for tcm_loop_nexus_cit */
static ssize_t tcm_loop_tpg_show_nexus(
struct se_portal_group *se_tpg,
char *page)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
tl_nexus = tl_tpg->tl_hba->tl_nexus;
if (!tl_nexus)
return -ENODEV;
ret = snprintf(page, PAGE_SIZE, "%s\n",
tl_nexus->se_sess->se_node_acl->initiatorname);
return ret;
}
static ssize_t tcm_loop_tpg_store_nexus(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
int ret;
/*
* Shutdown the active I_T nexus if 'NULL' is passed..
*/
if (!strncmp(page, "NULL", 4)) {
ret = tcm_loop_drop_nexus(tl_tpg);
return (!ret) ? count : ret;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
goto check_newline;
}
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline:
if (i_port[strlen(i_port)-1] == '\n')
i_port[strlen(i_port)-1] = '\0';
ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
if (ret < 0)
return ret;
return count;
}
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
static ssize_t tcm_loop_tpg_show_transport_status(
struct se_portal_group *se_tpg,
char *page)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
const char *status = NULL;
ssize_t ret = -EINVAL;
switch (tl_tpg->tl_transport_status) {
case TCM_TRANSPORT_ONLINE:
status = "online";
break;
case TCM_TRANSPORT_OFFLINE:
status = "offline";
break;
default:
break;
}
if (status)
ret = snprintf(page, PAGE_SIZE, "%s\n", status);
return ret;
}
static ssize_t tcm_loop_tpg_store_transport_status(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
if (!strncmp(page, "online", 6)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
return count;
}
if (!strncmp(page, "offline", 7)) {
tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
return count;
}
return -EINVAL;
}
TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
&tcm_loop_tpg_nexus.attr,
&tcm_loop_tpg_transport_status.attr,
NULL,
};
/* Start items for tcm_loop_naa_cit */
static struct se_portal_group *tcm_loop_make_naa_tpg(
struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
char *tpgt_str, *end_ptr;
int ret;
unsigned short int tpgt;
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
if (tpgt >= TL_TPGS_PER_HBA) {
pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
tl_tpg->tl_hba = tl_hba;
tl_tpg->tl_tpgt = tpgt;
/*
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
}
static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba;
unsigned short tpgt;
tl_hba = tl_tpg->tl_hba;
tpgt = tl_tpg->tl_tpgt;
/*
* Release the I_T Nexus for the Virtual SAS link if present
*/
tcm_loop_drop_nexus(tl_tpg);
/*
* Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
tl_tpg->tl_hba = NULL;
tl_tpg->tl_tpgt = 0;
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
/* End items for tcm_loop_naa_cit */
/* Start items for tcm_loop_cit */
static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
char *ptr;
int ret, off = 0;
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
*/
ptr = strstr(name, "naa.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
goto check_len;
}
ptr = strstr(name, "fc.");
if (ptr) {
tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
off = 3; /* Skip over "fc." */
goto check_len;
}
ptr = strstr(name, "iqn.");
if (!ptr) {
pr_err("Unable to locate prefix for emulated Target "
"Port: %s\n", name);
ret = -EINVAL;
goto out;
}
tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
/*
* Call device_register(tl_hba->dev) to register the emulated
* Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
* device_register() callbacks in tcm_loop_driver_probe()
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
goto out;
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
return &tl_hba->tl_hba_wwn;
out:
kfree(tl_hba);
return ERR_PTR(ret);
}
static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
}
/* Start items for tcm_loop_cit */
static ssize_t tcm_loop_wwn_show_attr_version(
struct target_fabric_configfs *tf,
char *page)
{
return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
}
TF_WWN_ATTR_RO(tcm_loop, version);
static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
&tcm_loop_wwn_version.attr,
NULL,
};
/* End items for tcm_loop_cit */
static int tcm_loop_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
/*
* Set the TCM Loop HBA counter to zero
*/
tcm_loop_hba_no_cnt = 0;
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
if (IS_ERR(fabric)) {
pr_err("tcm_loop_register_configfs() failed!\n");
return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
*/
fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
fabric->tf_ops.tpg_get_pr_transport_id_len =
&tcm_loop_get_pr_transport_id_len;
fabric->tf_ops.tpg_parse_pr_out_transport_id =
&tcm_loop_parse_pr_out_transport_id;
fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
fabric->tf_ops.tpg_check_demo_mode_cache =
&tcm_loop_check_demo_mode_cache;
fabric->tf_ops.tpg_check_demo_mode_write_protect =
&tcm_loop_check_demo_mode_write_protect;
fabric->tf_ops.tpg_check_prod_mode_write_protect =
&tcm_loop_check_prod_mode_write_protect;
/*
* The TCM loopback fabric module runs in demo-mode to a local
* virtual SCSI device, so fabric dependent initator ACLs are
* not required.
*/
fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
fabric->tf_ops.tpg_release_fabric_acl =
&tcm_loop_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
/*
* Used for setting up remaining TCM resources in process context
*/
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
fabric->tf_ops.sess_get_initiator_sid = NULL;
fabric->tf_ops.write_pending = &tcm_loop_write_pending;
fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
/*
* Not used for TCM loopback
*/
fabric->tf_ops.set_default_node_attributes =
&tcm_loop_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
/*
* Setup function pointers for generic logic in target_core_fabric_configfs.c
*/
fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
/*
* fabric_post_link() and fabric_pre_unlink() are used for
* registration and release of TCM Loop Virtual SCSI LUNs.
*/
fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
fabric->tf_ops.fabric_make_np = NULL;
fabric->tf_ops.fabric_drop_np = NULL;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
/*
* Once fabric->tf_ops has been setup, now register the fabric for
* use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
}
/*
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
static void tcm_loop_deregister_configfs(void)
{
if (!tcm_loop_fabric_configfs)
return;
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
if (!tcm_loop_workqueue)
goto out;
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
goto out_destroy_workqueue;
}
ret = tcm_loop_alloc_core_bus();
if (ret)
goto out_destroy_cache;
ret = tcm_loop_register_configfs();
if (ret)
goto out_release_core_bus;
return 0;
out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
out_destroy_workqueue:
destroy_workqueue(tcm_loop_workqueue);
out:
return ret;
}
static void __exit tcm_loop_fabric_exit(void)
{
tcm_loop_deregister_configfs();
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
MODULE_LICENSE("GPL");
module_init(tcm_loop_fabric_init);
module_exit(tcm_loop_fabric_exit);
| gpl-2.0 |
Broadcom/cygnus-linux | drivers/remoteproc/st_remoteproc.c | 177 | 10121 | /*
* ST's Remote Processor Control Driver
*
* Copyright (C) 2015 STMicroelectronics - All Rights Reserved
*
* Author: Ludovic Barre <ludovic.barre@st.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
#include "remoteproc_internal.h"
#define ST_RPROC_VQ0 0
#define ST_RPROC_VQ1 1
#define ST_RPROC_MAX_VRING 2
#define MBOX_RX 0
#define MBOX_TX 1
#define MBOX_MAX 2
struct st_rproc_config {
bool sw_reset;
bool pwr_reset;
unsigned long bootaddr_mask;
};
struct st_rproc {
struct st_rproc_config *config;
struct reset_control *sw_reset;
struct reset_control *pwr_reset;
struct clk *clk;
u32 clk_rate;
struct regmap *boot_base;
u32 boot_offset;
struct mbox_chan *mbox_chan[ST_RPROC_MAX_VRING * MBOX_MAX];
struct mbox_client mbox_client_vq0;
struct mbox_client mbox_client_vq1;
};
static void st_rproc_mbox_callback(struct device *dev, u32 msg)
{
struct rproc *rproc = dev_get_drvdata(dev);
if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
}
static
void st_rproc_mbox_callback_vq0(struct mbox_client *mbox_client, void *data)
{
st_rproc_mbox_callback(mbox_client->dev, 0);
}
static
void st_rproc_mbox_callback_vq1(struct mbox_client *mbox_client, void *data)
{
st_rproc_mbox_callback(mbox_client->dev, 1);
}
static void st_rproc_kick(struct rproc *rproc, int vqid)
{
struct st_rproc *ddata = rproc->priv;
struct device *dev = rproc->dev.parent;
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
if (WARN_ON(vqid >= ST_RPROC_MAX_VRING))
return;
ret = mbox_send_message(ddata->mbox_chan[vqid * MBOX_MAX + MBOX_TX],
(void *)&vqid);
if (ret < 0)
dev_err(dev, "failed to send message via mbox: %d\n", ret);
}
static int st_rproc_start(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
int err;
regmap_update_bits(ddata->boot_base, ddata->boot_offset,
ddata->config->bootaddr_mask, rproc->bootaddr);
err = clk_enable(ddata->clk);
if (err) {
dev_err(&rproc->dev, "Failed to enable clock\n");
return err;
}
if (ddata->config->sw_reset) {
err = reset_control_deassert(ddata->sw_reset);
if (err) {
dev_err(&rproc->dev, "Failed to deassert S/W Reset\n");
goto sw_reset_fail;
}
}
if (ddata->config->pwr_reset) {
err = reset_control_deassert(ddata->pwr_reset);
if (err) {
dev_err(&rproc->dev, "Failed to deassert Power Reset\n");
goto pwr_reset_fail;
}
}
dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr);
return 0;
pwr_reset_fail:
if (ddata->config->pwr_reset)
reset_control_assert(ddata->sw_reset);
sw_reset_fail:
clk_disable(ddata->clk);
return err;
}
static int st_rproc_stop(struct rproc *rproc)
{
struct st_rproc *ddata = rproc->priv;
int sw_err = 0, pwr_err = 0;
if (ddata->config->sw_reset) {
sw_err = reset_control_assert(ddata->sw_reset);
if (sw_err)
dev_err(&rproc->dev, "Failed to assert S/W Reset\n");
}
if (ddata->config->pwr_reset) {
pwr_err = reset_control_assert(ddata->pwr_reset);
if (pwr_err)
dev_err(&rproc->dev, "Failed to assert Power Reset\n");
}
clk_disable(ddata->clk);
return sw_err ?: pwr_err;
}
static const struct rproc_ops st_rproc_ops = {
.kick = st_rproc_kick,
.start = st_rproc_start,
.stop = st_rproc_stop,
};
/*
* Fetch state of the processor: 0 is off, 1 is on.
*/
static int st_rproc_state(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
int reset_sw = 0, reset_pwr = 0;
if (ddata->config->sw_reset)
reset_sw = reset_control_status(ddata->sw_reset);
if (ddata->config->pwr_reset)
reset_pwr = reset_control_status(ddata->pwr_reset);
if (reset_sw < 0 || reset_pwr < 0)
return -EINVAL;
return !reset_sw && !reset_pwr;
}
static const struct st_rproc_config st40_rproc_cfg = {
.sw_reset = true,
.pwr_reset = true,
.bootaddr_mask = GENMASK(28, 1),
};
static const struct st_rproc_config st231_rproc_cfg = {
.sw_reset = true,
.pwr_reset = false,
.bootaddr_mask = GENMASK(31, 6),
};
static const struct of_device_id st_rproc_match[] = {
{ .compatible = "st,st40-rproc", .data = &st40_rproc_cfg },
{ .compatible = "st,st231-rproc", .data = &st231_rproc_cfg },
{},
};
MODULE_DEVICE_TABLE(of, st_rproc_match);
static int st_rproc_parse_dt(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
struct device_node *np = dev->of_node;
int err;
if (ddata->config->sw_reset) {
ddata->sw_reset = devm_reset_control_get_exclusive(dev,
"sw_reset");
if (IS_ERR(ddata->sw_reset)) {
dev_err(dev, "Failed to get S/W Reset\n");
return PTR_ERR(ddata->sw_reset);
}
}
if (ddata->config->pwr_reset) {
ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
"pwr_reset");
if (IS_ERR(ddata->pwr_reset)) {
dev_err(dev, "Failed to get Power Reset\n");
return PTR_ERR(ddata->pwr_reset);
}
}
ddata->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddata->clk)) {
dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddata->clk);
}
err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
if (err) {
dev_err(dev, "failed to get clock frequency\n");
return err;
}
ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
if (IS_ERR(ddata->boot_base)) {
dev_err(dev, "Boot base not found\n");
return PTR_ERR(ddata->boot_base);
}
err = of_property_read_u32_index(np, "st,syscfg", 1,
&ddata->boot_offset);
if (err) {
dev_err(dev, "Boot offset not found\n");
return -EINVAL;
}
err = of_reserved_mem_device_init(dev);
if (err) {
dev_err(dev, "Failed to obtain shared memory\n");
return err;
}
err = clk_prepare(ddata->clk);
if (err)
dev_err(dev, "failed to get clock\n");
return err;
}
static int st_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
struct st_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
struct mbox_chan *chan;
int enabled;
int ret, i;
match = of_match_device(st_rproc_match, dev);
if (!match || !match->data) {
dev_err(dev, "No device match found\n");
return -ENODEV;
}
rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ddata = rproc->priv;
ddata->config = (struct st_rproc_config *)match->data;
platform_set_drvdata(pdev, rproc);
ret = st_rproc_parse_dt(pdev);
if (ret)
goto free_rproc;
enabled = st_rproc_state(pdev);
if (enabled < 0) {
ret = enabled;
goto free_clk;
}
if (enabled) {
atomic_inc(&rproc->power);
rproc->state = RPROC_RUNNING;
} else {
clk_set_rate(ddata->clk, ddata->clk_rate);
}
if (of_get_property(np, "mbox-names", NULL)) {
ddata->mbox_client_vq0.dev = dev;
ddata->mbox_client_vq0.tx_done = NULL;
ddata->mbox_client_vq0.tx_block = false;
ddata->mbox_client_vq0.knows_txdone = false;
ddata->mbox_client_vq0.rx_callback = st_rproc_mbox_callback_vq0;
ddata->mbox_client_vq1.dev = dev;
ddata->mbox_client_vq1.tx_done = NULL;
ddata->mbox_client_vq1.tx_block = false;
ddata->mbox_client_vq1.knows_txdone = false;
ddata->mbox_client_vq1.rx_callback = st_rproc_mbox_callback_vq1;
/*
* To control a co-processor without IPC mechanism.
* This driver can be used without mbox and rpmsg.
*/
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 0\n");
ret = PTR_ERR(chan);
goto free_clk;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 0\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 1\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
if (IS_ERR(chan)) {
dev_err(&rproc->dev, "failed to request mbox chan 1\n");
ret = PTR_ERR(chan);
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
}
ret = rproc_add(rproc);
if (ret)
goto free_mbox;
return 0;
free_mbox:
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
free_clk:
clk_unprepare(ddata->clk);
free_rproc:
rproc_free(rproc);
return ret;
}
static int st_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
struct st_rproc *ddata = rproc->priv;
int i;
rproc_del(rproc);
clk_disable_unprepare(ddata->clk);
of_reserved_mem_device_release(&pdev->dev);
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
rproc_free(rproc);
return 0;
}
static struct platform_driver st_rproc_driver = {
.probe = st_rproc_probe,
.remove = st_rproc_remove,
.driver = {
.name = "st-rproc",
.of_match_table = of_match_ptr(st_rproc_match),
},
};
module_platform_driver(st_rproc_driver);
MODULE_DESCRIPTION("ST Remote Processor Control Driver");
MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
sub-b/android_kernel_samsung_matissewifi-old | drivers/platform/msm/sps/sps.c | 177 | 66357 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Smart-Peripheral-Switch (SPS) Module. */
#include <linux/types.h> /* u32 */
#include <linux/kernel.h> /* pr_info() */
#include <linux/module.h> /* module_init() */
#include <linux/slab.h> /* kzalloc() */
#include <linux/mutex.h> /* mutex */
#include <linux/device.h> /* device */
#include <linux/fs.h> /* alloc_chrdev_region() */
#include <linux/list.h> /* list_head */
#include <linux/memory.h> /* memset */
#include <linux/io.h> /* ioremap() */
#include <linux/clk.h> /* clk_enable() */
#include <linux/platform_device.h> /* platform_get_resource_byname() */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <mach/msm_sps.h> /* msm_sps_platform_data */
#include "sps_bam.h"
#include "spsi.h"
#include "sps_core.h"
#define SPS_DRV_NAME "msm_sps" /* must match the platform_device name */
/**
* SPS Driver state struct
*/
struct sps_drv {
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct clk *pmem_clk;
struct clk *bamdma_clk;
struct clk *dfab_clk;
int is_ready;
/* Platform data */
u32 pipemem_phys_base;
u32 pipemem_size;
u32 bamdma_bam_phys_base;
u32 bamdma_bam_size;
u32 bamdma_dma_phys_base;
u32 bamdma_dma_size;
u32 bamdma_irq;
u32 bamdma_restricted_pipes;
/* Driver options bitflags (see SPS_OPT_*) */
u32 options;
/* Mutex to protect BAM and connection queues */
struct mutex lock;
/* BAM devices */
struct list_head bams_q;
char *hal_bam_version;
/* Connection control state */
struct sps_rm connection_ctrl;
};
/**
* SPS driver state
*/
static struct sps_drv *sps;
u32 d_type;
bool enhd_pipe;
bool imem;
static void sps_device_de_init(void);
#ifdef CONFIG_DEBUG_FS
u8 debugfs_record_enabled;
u8 logging_option;
u8 debug_level_option;
u8 print_limit_option;
u8 reg_dump_option;
u32 testbus_sel;
u32 bam_pipe_sel;
u32 desc_option;
static char *debugfs_buf;
static u32 debugfs_buf_size;
static u32 debugfs_buf_used;
static int wraparound;
struct dentry *dent;
struct dentry *dfile_info;
struct dentry *dfile_logging_option;
struct dentry *dfile_debug_level_option;
struct dentry *dfile_print_limit_option;
struct dentry *dfile_reg_dump_option;
struct dentry *dfile_testbus_sel;
struct dentry *dfile_bam_pipe_sel;
struct dentry *dfile_desc_option;
struct dentry *dfile_bam_addr;
static struct sps_bam *phy2bam(u32 phys_addr);
/* record debug info for debugfs */
void sps_debugfs_record(const char *msg)
{
if (debugfs_record_enabled) {
if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) {
debugfs_buf_used = 0;
wraparound = true;
}
debugfs_buf_used += scnprintf(debugfs_buf + debugfs_buf_used,
debugfs_buf_size - debugfs_buf_used, msg);
if (wraparound)
scnprintf(debugfs_buf + debugfs_buf_used,
debugfs_buf_size - debugfs_buf_used,
"\n**** end line of sps log ****\n\n");
}
}
/* read the recorded debug info to userspace */
static ssize_t sps_read_info(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int ret = 0;
int size;
if (debugfs_record_enabled) {
if (wraparound)
size = debugfs_buf_size - MAX_MSG_LEN;
else
size = debugfs_buf_used;
ret = simple_read_from_buffer(ubuf, count, ppos,
debugfs_buf, size);
}
return ret;
}
/*
* set the buffer size (in KB) for debug info
*/
static ssize_t sps_set_info(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long missing;
char str[MAX_MSG_LEN];
int i;
u32 buf_size_kb = 0;
u32 new_buf_size;
memset(str, 0, sizeof(str));
missing = copy_from_user(str, buf, sizeof(str));
if (missing)
return -EFAULT;
for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
buf_size_kb = (buf_size_kb * 10) + (str[i] - '0');
pr_info("sps:debugfs: input buffer size is %dKB\n", buf_size_kb);
if ((logging_option == 0) || (logging_option == 2)) {
pr_info("sps:debugfs: need to first turn on recording.\n");
return -EFAULT;
}
if (buf_size_kb < 1) {
pr_info("sps:debugfs: buffer size should be "
"no less than 1KB.\n");
return -EFAULT;
}
new_buf_size = buf_size_kb * SZ_1K;
if (debugfs_record_enabled) {
if (debugfs_buf_size == new_buf_size) {
/* need do nothing */
pr_info("sps:debugfs: input buffer size "
"is the same as before.\n");
return count;
} else {
/* release the current buffer */
debugfs_record_enabled = false;
debugfs_buf_used = 0;
wraparound = false;
kfree(debugfs_buf);
debugfs_buf = NULL;
}
}
/* allocate new buffer */
debugfs_buf_size = new_buf_size;
debugfs_buf = kzalloc(sizeof(char) * debugfs_buf_size,
GFP_KERNEL);
if (!debugfs_buf) {
debugfs_buf_size = 0;
pr_err("sps:fail to allocate memory for debug_fs.\n");
return -ENOMEM;
}
debugfs_buf_used = 0;
wraparound = false;
debugfs_record_enabled = true;
return count;
}
const struct file_operations sps_info_ops = {
.read = sps_read_info,
.write = sps_set_info,
};
/* return the current logging option to userspace */
static ssize_t sps_read_logging_option(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char value[MAX_MSG_LEN];
int nbytes;
nbytes = snprintf(value, MAX_MSG_LEN, "%d\n", logging_option);
return simple_read_from_buffer(ubuf, count, ppos, value, nbytes);
}
/*
* set the logging option
*/
static ssize_t sps_set_logging_option(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long missing;
char str[MAX_MSG_LEN];
int i;
u8 option = 0;
memset(str, 0, sizeof(str));
missing = copy_from_user(str, buf, sizeof(str));
if (missing)
return -EFAULT;
for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
option = (option * 10) + (str[i] - '0');
pr_info("sps:debugfs: try to change logging option to %d\n", option);
if (option > 3) {
pr_err("sps:debugfs: invalid logging option:%d\n", option);
return count;
}
if (((option == 0) || (option == 2)) &&
((logging_option == 1) || (logging_option == 3))) {
debugfs_record_enabled = false;
kfree(debugfs_buf);
debugfs_buf = NULL;
debugfs_buf_used = 0;
debugfs_buf_size = 0;
wraparound = false;
}
logging_option = option;
return count;
}
const struct file_operations sps_logging_option_ops = {
.read = sps_read_logging_option,
.write = sps_set_logging_option,
};
/*
* input the bam physical address
*/
static ssize_t sps_set_bam_addr(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long missing;
char str[MAX_MSG_LEN];
u32 i;
u32 bam_addr = 0;
struct sps_bam *bam;
u32 num_pipes = 0;
void *vir_addr;
memset(str, 0, sizeof(str));
missing = copy_from_user(str, buf, sizeof(str));
if (missing)
return -EFAULT;
for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
bam_addr = (bam_addr * 10) + (str[i] - '0');
pr_info("sps:debugfs:input BAM physical address:0x%x\n", bam_addr);
bam = phy2bam(bam_addr);
if (bam == NULL) {
pr_err("sps:debugfs:BAM 0x%x is not registered.", bam_addr);
return count;
} else {
vir_addr = bam->base;
num_pipes = bam->props.num_pipes;
}
switch (reg_dump_option) {
case 1: /* output all registers of this BAM */
print_bam_reg(vir_addr);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_reg(vir_addr, i);
break;
case 2: /* output BAM-level registers */
print_bam_reg(vir_addr);
break;
case 3: /* output selected BAM-level registers */
print_bam_selected_reg(vir_addr, bam->props.ee);
break;
case 4: /* output selected registers of all pipes */
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 5: /* output selected registers of selected pipes */
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 6: /* output selected registers of typical pipes */
print_bam_pipe_selected_reg(vir_addr, 4);
print_bam_pipe_selected_reg(vir_addr, 5);
break;
case 7: /* output desc FIFO of all pipes */
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 8: /* output desc FIFO of selected pipes */
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 9: /* output desc FIFO of typical pipes */
print_bam_pipe_desc_fifo(vir_addr, 4, 0);
print_bam_pipe_desc_fifo(vir_addr, 5, 0);
break;
case 10: /* output selected registers and desc FIFO of all pipes */
for (i = 0; i < num_pipes; i++) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 11: /* output selected registers and desc FIFO of selected pipes */
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 12: /* output selected registers and desc FIFO of typical pipes */
print_bam_pipe_selected_reg(vir_addr, 4);
print_bam_pipe_desc_fifo(vir_addr, 4, 0);
print_bam_pipe_selected_reg(vir_addr, 5);
print_bam_pipe_desc_fifo(vir_addr, 5, 0);
break;
case 13: /* output BAM_TEST_BUS_REG */
if (testbus_sel)
print_bam_test_bus_reg(vir_addr, testbus_sel);
else {
pr_info("sps:output TEST_BUS_REG for all TEST_BUS_SEL");
print_bam_test_bus_reg(vir_addr, testbus_sel);
}
break;
case 14: /* output partial desc FIFO of selected pipes */
if (desc_option == 0)
desc_option = 1;
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i,
desc_option);
break;
case 15: /* output partial data blocks of descriptors */
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
case 16: /* output all registers of selected pipes */
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_reg(vir_addr, i);
break;
case 91: /* output testbus register, BAM global regisers
and registers of all pipes */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 92: /* output testbus register, BAM global regisers
and registers of selected pipes */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 93: /* output registers and partial desc FIFOs
of selected pipes: format 1 */
if (desc_option == 0)
desc_option = 1;
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i,
desc_option);
break;
case 94: /* output registers and partial desc FIFOs
of selected pipes: format 2 */
if (desc_option == 0)
desc_option = 1;
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i,
desc_option);
}
break;
case 95: /* output registers and desc FIFOs
of selected pipes: format 1 */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 96: /* output registers and desc FIFOs
of selected pipes: format 2 */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 97: /* output registers, desc FIFOs and partial data blocks
of selected pipes: format 1 */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
case 98: /* output registers, desc FIFOs and partial data blocks
of selected pipes: format 2 */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (bam_pipe_sel & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
print_bam_pipe_desc_fifo(vir_addr, i, 100);
}
break;
case 99: /* output all registers, desc FIFOs and partial data blocks */
print_bam_test_bus_reg(vir_addr, testbus_sel);
print_bam_reg(vir_addr);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_reg(vir_addr, i);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 0);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
default:
pr_info("sps:no dump option is chosen yet.");
}
return count;
}
const struct file_operations sps_bam_addr_ops = {
.write = sps_set_bam_addr,
};
static void sps_debugfs_init(void)
{
debugfs_record_enabled = false;
logging_option = 0;
debug_level_option = 0;
print_limit_option = 0;
reg_dump_option = 0;
testbus_sel = 0;
bam_pipe_sel = 0;
desc_option = 0;
debugfs_buf_size = 0;
debugfs_buf_used = 0;
wraparound = false;
dent = debugfs_create_dir("sps", 0);
if (IS_ERR(dent)) {
pr_err("sps:fail to create the folder for debug_fs.\n");
return;
}
dfile_info = debugfs_create_file("info", 0664, dent, 0,
&sps_info_ops);
if (!dfile_info || IS_ERR(dfile_info)) {
pr_err("sps:fail to create the file for debug_fs info.\n");
goto info_err;
}
dfile_logging_option = debugfs_create_file("logging_option", 0664,
dent, 0, &sps_logging_option_ops);
if (!dfile_logging_option || IS_ERR(dfile_logging_option)) {
pr_err("sps:fail to create the file for debug_fs "
"logging_option.\n");
goto logging_option_err;
}
dfile_debug_level_option = debugfs_create_u8("debug_level_option",
0664, dent, &debug_level_option);
if (!dfile_debug_level_option || IS_ERR(dfile_debug_level_option)) {
pr_err("sps:fail to create the file for debug_fs "
"debug_level_option.\n");
goto debug_level_option_err;
}
dfile_print_limit_option = debugfs_create_u8("print_limit_option",
0664, dent, &print_limit_option);
if (!dfile_print_limit_option || IS_ERR(dfile_print_limit_option)) {
pr_err("sps:fail to create the file for debug_fs "
"print_limit_option.\n");
goto print_limit_option_err;
}
dfile_reg_dump_option = debugfs_create_u8("reg_dump_option", 0664,
dent, ®_dump_option);
if (!dfile_reg_dump_option || IS_ERR(dfile_reg_dump_option)) {
pr_err("sps:fail to create the file for debug_fs "
"reg_dump_option.\n");
goto reg_dump_option_err;
}
dfile_testbus_sel = debugfs_create_u32("testbus_sel", 0664,
dent, &testbus_sel);
if (!dfile_testbus_sel || IS_ERR(dfile_testbus_sel)) {
pr_err("sps:fail to create debug_fs file for testbus_sel.\n");
goto testbus_sel_err;
}
dfile_bam_pipe_sel = debugfs_create_u32("bam_pipe_sel", 0664,
dent, &bam_pipe_sel);
if (!dfile_bam_pipe_sel || IS_ERR(dfile_bam_pipe_sel)) {
pr_err("sps:fail to create debug_fs file for bam_pipe_sel.\n");
goto bam_pipe_sel_err;
}
dfile_desc_option = debugfs_create_u32("desc_option", 0664,
dent, &desc_option);
if (!dfile_desc_option || IS_ERR(dfile_desc_option)) {
pr_err("sps:fail to create debug_fs file for desc_option.\n");
goto desc_option_err;
}
dfile_bam_addr = debugfs_create_file("bam_addr", 0664,
dent, 0, &sps_bam_addr_ops);
if (!dfile_bam_addr || IS_ERR(dfile_bam_addr)) {
pr_err("sps:fail to create the file for debug_fs "
"bam_addr.\n");
goto bam_addr_err;
}
return;
bam_addr_err:
debugfs_remove(dfile_desc_option);
desc_option_err:
debugfs_remove(dfile_bam_pipe_sel);
bam_pipe_sel_err:
debugfs_remove(dfile_testbus_sel);
testbus_sel_err:
debugfs_remove(dfile_reg_dump_option);
reg_dump_option_err:
debugfs_remove(dfile_print_limit_option);
print_limit_option_err:
debugfs_remove(dfile_debug_level_option);
debug_level_option_err:
debugfs_remove(dfile_logging_option);
logging_option_err:
debugfs_remove(dfile_info);
info_err:
debugfs_remove(dent);
}
static void sps_debugfs_exit(void)
{
if (dfile_info)
debugfs_remove(dfile_info);
if (dfile_logging_option)
debugfs_remove(dfile_logging_option);
if (dfile_debug_level_option)
debugfs_remove(dfile_debug_level_option);
if (dfile_print_limit_option)
debugfs_remove(dfile_print_limit_option);
if (dfile_reg_dump_option)
debugfs_remove(dfile_reg_dump_option);
if (dfile_testbus_sel)
debugfs_remove(dfile_testbus_sel);
if (dfile_bam_pipe_sel)
debugfs_remove(dfile_bam_pipe_sel);
if (dfile_desc_option)
debugfs_remove(dfile_desc_option);
if (dfile_bam_addr)
debugfs_remove(dfile_bam_addr);
if (dent)
debugfs_remove(dent);
kfree(debugfs_buf);
debugfs_buf = NULL;
}
#endif
/* Get the debug info of BAM registers and descriptor FIFOs */
int sps_get_bam_debug_info(u32 dev, u32 option, u32 para,
u32 tb_sel, u32 desc_sel)
{
int res = 0;
struct sps_bam *bam;
u32 i;
u32 num_pipes = 0;
void *vir_addr;
if (dev == 0) {
SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
return SPS_ERROR;
}
if (sps == NULL || !sps->is_ready) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
mutex_lock(&sps->lock);
/* Search for the target BAM device */
bam = sps_h2bam(dev);
if (bam == NULL) {
pr_err("sps:Can't find any BAM with handle 0x%x.", dev);
mutex_unlock(&sps->lock);
return SPS_ERROR;
}
mutex_unlock(&sps->lock);
vir_addr = bam->base;
num_pipes = bam->props.num_pipes;
SPS_INFO("sps:<bam-addr> dump BAM:0x%x.\n", bam->props.phys_addr);
switch (option) {
case 1: /* output all registers of this BAM */
print_bam_reg(vir_addr);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_reg(vir_addr, i);
break;
case 2: /* output BAM-level registers */
print_bam_reg(vir_addr);
break;
case 3: /* output selected BAM-level registers */
print_bam_selected_reg(vir_addr, bam->props.ee);
break;
case 4: /* output selected registers of all pipes */
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 5: /* output selected registers of selected pipes */
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 6: /* output selected registers of typical pipes */
print_bam_pipe_selected_reg(vir_addr, 4);
print_bam_pipe_selected_reg(vir_addr, 5);
break;
case 7: /* output desc FIFO of all pipes */
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 8: /* output desc FIFO of selected pipes */
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 9: /* output desc FIFO of typical pipes */
print_bam_pipe_desc_fifo(vir_addr, 4, 0);
print_bam_pipe_desc_fifo(vir_addr, 5, 0);
break;
case 10: /* output selected registers and desc FIFO of all pipes */
for (i = 0; i < num_pipes; i++) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 11: /* output selected registers and desc FIFO of selected pipes */
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 12: /* output selected registers and desc FIFO of typical pipes */
print_bam_pipe_selected_reg(vir_addr, 4);
print_bam_pipe_desc_fifo(vir_addr, 4, 0);
print_bam_pipe_selected_reg(vir_addr, 5);
print_bam_pipe_desc_fifo(vir_addr, 5, 0);
break;
case 13: /* output BAM_TEST_BUS_REG */
if (tb_sel)
print_bam_test_bus_reg(vir_addr, tb_sel);
else
pr_info("sps:TEST_BUS_SEL should NOT be zero.");
break;
case 14: /* output partial desc FIFO of selected pipes */
if (desc_sel == 0)
desc_sel = 1;
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i,
desc_sel);
break;
case 15: /* output partial data blocks of descriptors */
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
case 16: /* output all registers of selected pipes */
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_reg(vir_addr, i);
break;
case 91: /* output testbus register, BAM global regisers
and registers of all pipes */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 92: /* output testbus register, BAM global regisers
and registers of selected pipes */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
break;
case 93: /* output registers and partial desc FIFOs
of selected pipes: format 1 */
if (desc_sel == 0)
desc_sel = 1;
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i,
desc_sel);
break;
case 94: /* output registers and partial desc FIFOs
of selected pipes: format 2 */
if (desc_sel == 0)
desc_sel = 1;
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i,
desc_sel);
}
break;
case 95: /* output registers and desc FIFOs
of selected pipes: format 1 */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
break;
case 96: /* output registers and desc FIFOs
of selected pipes: format 2 */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
}
break;
case 97: /* output registers, desc FIFOs and partial data blocks
of selected pipes: format 1 */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 0);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i))
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
case 98: /* output registers, desc FIFOs and partial data blocks
of selected pipes: format 2 */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
if (para & (1UL << i)) {
print_bam_pipe_selected_reg(vir_addr, i);
print_bam_pipe_desc_fifo(vir_addr, i, 0);
print_bam_pipe_desc_fifo(vir_addr, i, 100);
}
break;
case 99: /* output all registers, desc FIFOs and partial data blocks */
print_bam_test_bus_reg(vir_addr, tb_sel);
print_bam_reg(vir_addr);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_reg(vir_addr, i);
print_bam_selected_reg(vir_addr, bam->props.ee);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_selected_reg(vir_addr, i);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 0);
for (i = 0; i < num_pipes; i++)
print_bam_pipe_desc_fifo(vir_addr, i, 100);
break;
default:
pr_info("sps:no option is chosen yet.");
}
return res;
}
EXPORT_SYMBOL(sps_get_bam_debug_info);
/**
* Initialize SPS device
*
* This function initializes the SPS device.
*
* @return 0 on success, negative value on error
*
*/
static int sps_device_init(void)
{
int result;
int success;
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
struct sps_bam_props bamdma_props = {0};
#endif
SPS_DBG2("sps:%s.", __func__);
success = false;
result = sps_mem_init(sps->pipemem_phys_base, sps->pipemem_size);
if (result) {
SPS_ERR("sps:SPS memory init failed");
goto exit_err;
}
INIT_LIST_HEAD(&sps->bams_q);
mutex_init(&sps->lock);
if (sps_rm_init(&sps->connection_ctrl, sps->options)) {
SPS_ERR("sps:Fail to init SPS resource manager");
goto exit_err;
}
result = sps_bam_driver_init(sps->options);
if (result) {
SPS_ERR("sps:SPS BAM driver init failed");
goto exit_err;
}
/* Initialize the BAM DMA device */
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
bamdma_props.phys_addr = sps->bamdma_bam_phys_base;
bamdma_props.virt_addr = ioremap(sps->bamdma_bam_phys_base,
sps->bamdma_bam_size);
if (!bamdma_props.virt_addr) {
SPS_ERR("sps:Fail to IO map BAM-DMA BAM registers.\n");
goto exit_err;
}
SPS_DBG2("sps:bamdma_bam.phys=0x%x.virt=0x%x.",
bamdma_props.phys_addr,
(u32) bamdma_props.virt_addr);
bamdma_props.periph_phys_addr = sps->bamdma_dma_phys_base;
bamdma_props.periph_virt_size = sps->bamdma_dma_size;
bamdma_props.periph_virt_addr = ioremap(sps->bamdma_dma_phys_base,
sps->bamdma_dma_size);
if (!bamdma_props.periph_virt_addr) {
SPS_ERR("sps:Fail to IO map BAM-DMA peripheral reg.\n");
goto exit_err;
}
SPS_DBG2("sps:bamdma_dma.phys=0x%x.virt=0x%x.",
bamdma_props.periph_phys_addr,
(u32) bamdma_props.periph_virt_addr);
bamdma_props.irq = sps->bamdma_irq;
bamdma_props.event_threshold = 0x10; /* Pipe event threshold */
bamdma_props.summing_threshold = 0x10; /* BAM event threshold */
bamdma_props.options = SPS_BAM_OPT_BAMDMA;
bamdma_props.restricted_pipes = sps->bamdma_restricted_pipes;
result = sps_dma_init(&bamdma_props);
if (result) {
SPS_ERR("sps:SPS BAM DMA driver init failed");
goto exit_err;
}
#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
result = sps_map_init(NULL, sps->options);
if (result) {
SPS_ERR("sps:SPS connection mapping init failed");
goto exit_err;
}
success = true;
exit_err:
if (!success) {
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
sps_device_de_init();
#endif
return SPS_ERROR;
}
return 0;
}
/**
* De-initialize SPS device
*
* This function de-initializes the SPS device.
*
* @return 0 on success, negative value on error
*
*/
static void sps_device_de_init(void)
{
SPS_DBG2("sps:%s.", __func__);
if (sps != NULL) {
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
sps_dma_de_init();
#endif
/* Are there any remaining BAM registrations? */
if (!list_empty(&sps->bams_q))
SPS_ERR("sps:SPS de-init: BAMs are still registered");
sps_map_de_init();
kfree(sps);
}
sps_mem_de_init();
}
/**
* Initialize client state context
*
* This function initializes a client state context struct.
*
* @client - Pointer to client state context
*
* @return 0 on success, negative value on error
*
*/
static int sps_client_init(struct sps_pipe *client)
{
SPS_DBG("sps:%s.", __func__);
if (client == NULL)
return -EINVAL;
/*
* NOTE: Cannot store any state within the SPS driver because
* the driver init function may not have been called yet.
*/
memset(client, 0, sizeof(*client));
sps_rm_config_init(&client->connect);
client->client_state = SPS_STATE_DISCONNECT;
client->bam = NULL;
return 0;
}
/**
* De-initialize client state context
*
* This function de-initializes a client state context struct.
*
* @client - Pointer to client state context
*
* @return 0 on success, negative value on error
*
*/
static int sps_client_de_init(struct sps_pipe *client)
{
SPS_DBG("sps:%s.", __func__);
if (client->client_state != SPS_STATE_DISCONNECT) {
SPS_ERR("sps:De-init client in connected state: 0x%x",
client->client_state);
return SPS_ERROR;
}
client->bam = NULL;
client->map = NULL;
memset(&client->connect, 0, sizeof(client->connect));
return 0;
}
/**
* Find the BAM device from the physical address
*
* This function finds a BAM device in the BAM registration list that
* matches the specified physical address.
*
* @phys_addr - physical address of the BAM
*
* @return - pointer to the BAM device struct, or NULL on error
*
*/
static struct sps_bam *phy2bam(u32 phys_addr)
{
struct sps_bam *bam;
SPS_DBG("sps:%s.", __func__);
list_for_each_entry(bam, &sps->bams_q, list) {
if (bam->props.phys_addr == phys_addr)
return bam;
}
return NULL;
}
/**
* Find the handle of a BAM device based on the physical address
*
* This function finds a BAM device in the BAM registration list that
* matches the specified physical address, and returns its handle.
*
* @phys_addr - physical address of the BAM
*
* @h - device handle of the BAM
*
* @return 0 on success, negative value on error
*
*/
int sps_phy2h(u32 phys_addr, u32 *handle)
{
struct sps_bam *bam;
SPS_DBG("sps:%s.", __func__);
if (sps == NULL || !sps->is_ready) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
if (handle == NULL) {
SPS_ERR("sps:%s:handle is NULL.\n", __func__);
return SPS_ERROR;
}
list_for_each_entry(bam, &sps->bams_q, list) {
if (bam->props.phys_addr == phys_addr) {
*handle = (u32) bam;
return 0;
}
}
SPS_ERR("sps: BAM device 0x%x is not registered yet.\n", phys_addr);
return -ENODEV;
}
EXPORT_SYMBOL(sps_phy2h);
/**
* Setup desc/data FIFO for bam-to-bam connection
*
* @mem_buffer - Pointer to struct for allocated memory properties.
*
* @addr - address of FIFO
*
* @size - FIFO size
*
* @use_offset - use address offset instead of absolute address
*
* @return 0 on success, negative value on error
*
*/
int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
u32 addr, u32 size, int use_offset)
{
SPS_DBG("sps:%s.", __func__);
if ((mem_buffer == NULL) || (size == 0)) {
SPS_ERR("sps:invalid buffer address or size.");
return SPS_ERROR;
}
if (sps == NULL || !sps->is_ready) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
if (use_offset) {
if ((addr + size) <= sps->pipemem_size)
mem_buffer->phys_base = sps->pipemem_phys_base + addr;
else {
SPS_ERR("sps:requested mem is out of "
"pipe mem range.\n");
return SPS_ERROR;
}
} else {
if (addr >= sps->pipemem_phys_base &&
(addr + size) <= (sps->pipemem_phys_base
+ sps->pipemem_size))
mem_buffer->phys_base = addr;
else {
SPS_ERR("sps:requested mem is out of "
"pipe mem range.\n");
return SPS_ERROR;
}
}
mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
mem_buffer->size = size;
memset(mem_buffer->base, 0, mem_buffer->size);
return 0;
}
EXPORT_SYMBOL(sps_setup_bam2bam_fifo);
/**
* Find the BAM device from the handle
*
* This function finds a BAM device in the BAM registration list that
* matches the specified device handle.
*
* @h - device handle of the BAM
*
* @return - pointer to the BAM device struct, or NULL on error
*
*/
struct sps_bam *sps_h2bam(u32 h)
{
struct sps_bam *bam;
SPS_DBG("sps:%s.", __func__);
if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
return NULL;
list_for_each_entry(bam, &sps->bams_q, list) {
if ((u32) bam == (u32) h)
return bam;
}
SPS_ERR("sps:Can't find BAM device for handle 0x%x.", h);
return NULL;
}
/**
* Lock BAM device
*
* This function obtains the BAM spinlock on the client's connection.
*
* @pipe - pointer to client pipe state
*
* @return pointer to BAM device struct, or NULL on error
*
*/
static struct sps_bam *sps_bam_lock(struct sps_pipe *pipe)
{
struct sps_bam *bam;
u32 pipe_index;
bam = pipe->bam;
if (bam == NULL) {
SPS_ERR("sps:Connection is not in connected state.");
return NULL;
}
spin_lock_irqsave(&bam->connection_lock, bam->irqsave_flags);
/* Verify client owns this pipe */
pipe_index = pipe->pipe_index;
if (pipe_index >= bam->props.num_pipes ||
pipe != bam->pipes[pipe_index]) {
SPS_ERR("sps:Client not owner of BAM 0x%x pipe: %d (max %d)",
bam->props.phys_addr, pipe_index,
bam->props.num_pipes);
spin_unlock_irqrestore(&bam->connection_lock,
bam->irqsave_flags);
return NULL;
}
return bam;
}
/**
* Unlock BAM device
*
* This function releases the BAM spinlock on the client's connection.
*
* @bam - pointer to BAM device struct
*
*/
static inline void sps_bam_unlock(struct sps_bam *bam)
{
spin_unlock_irqrestore(&bam->connection_lock, bam->irqsave_flags);
}
/**
* Connect an SPS connection end point
*
*/
int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
{
struct sps_pipe *pipe = h;
u32 dev;
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (connect == NULL) {
SPS_ERR("sps:%s:connection is NULL.\n", __func__);
return SPS_ERROR;
}
if (sps == NULL)
return -ENODEV;
if (!sps->is_ready) {
SPS_ERR("sps:sps_connect:sps driver is not ready.\n");
return -EAGAIN;
}
if ((connect->lock_group != SPSRM_CLEAR)
&& (connect->lock_group > BAM_MAX_P_LOCK_GROUP_NUM)) {
SPS_ERR("sps:The value of pipe lock group is invalid.\n");
return SPS_ERROR;
}
mutex_lock(&sps->lock);
/*
* Must lock the BAM device at the top level function, so must
* determine which BAM is the target for the connection
*/
if (connect->mode == SPS_MODE_SRC)
dev = connect->source;
else
dev = connect->destination;
bam = sps_h2bam(dev);
if (bam == NULL) {
SPS_ERR("sps:Invalid BAM device handle: 0x%x", dev);
result = SPS_ERROR;
goto exit_err;
}
SPS_DBG2("sps:sps_connect: bam 0x%x src 0x%x dest 0x%x mode %s",
BAM_ID(bam),
connect->source,
connect->destination,
connect->mode == SPS_MODE_SRC ? "SRC" : "DEST");
/* Allocate resources for the specified connection */
pipe->connect = *connect;
mutex_lock(&bam->lock);
result = sps_rm_state_change(pipe, SPS_STATE_ALLOCATE);
mutex_unlock(&bam->lock);
if (result)
goto exit_err;
/* Configure the connection */
mutex_lock(&bam->lock);
result = sps_rm_state_change(pipe, SPS_STATE_CONNECT);
mutex_unlock(&bam->lock);
if (result) {
sps_disconnect(h);
goto exit_err;
}
exit_err:
mutex_unlock(&sps->lock);
return result;
}
EXPORT_SYMBOL(sps_connect);
/**
* Disconnect an SPS connection end point
*
* This function disconnects an SPS connection end point.
* The SPS hardware associated with that end point will be disabled.
* For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
* connection resources are deallocated. For a peripheral-to-peripheral
* connection, the resources associated with the connection will not be
* deallocated until both end points are closed.
*
* The client must call sps_connect() for the handle before calling
* this function.
*
* @h - client context for SPS connection end point
*
* @return 0 on success, negative value on error
*
*/
int sps_disconnect(struct sps_pipe *h)
{
struct sps_pipe *pipe = h;
struct sps_pipe *check;
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s.", __func__);
if (pipe == NULL) {
SPS_ERR("sps:Invalid pipe.");
return SPS_ERROR;
}
bam = pipe->bam;
if (bam == NULL) {
SPS_ERR("sps:BAM device of this pipe is NULL.");
return SPS_ERROR;
}
SPS_DBG2("sps:sps_disconnect: bam 0x%x src 0x%x dest 0x%x mode %s",
BAM_ID(bam),
pipe->connect.source,
pipe->connect.destination,
pipe->connect.mode == SPS_MODE_SRC ? "SRC" : "DEST");
result = SPS_ERROR;
/* Cross-check client with map table */
if (pipe->connect.mode == SPS_MODE_SRC)
check = pipe->map->client_src;
else
check = pipe->map->client_dest;
if (check != pipe) {
SPS_ERR("sps:Client context is corrupt");
goto exit_err;
}
/* Disconnect the BAM pipe */
mutex_lock(&bam->lock);
result = sps_rm_state_change(pipe, SPS_STATE_DISCONNECT);
mutex_unlock(&bam->lock);
if (result)
goto exit_err;
sps_rm_config_init(&pipe->connect);
result = 0;
exit_err:
return result;
}
EXPORT_SYMBOL(sps_disconnect);
/**
* Register an event object for an SPS connection end point
*
*/
int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (reg == NULL) {
SPS_ERR("sps:%s:registered event is NULL.\n", __func__);
return SPS_ERROR;
}
if (sps == NULL)
return -ENODEV;
if (!sps->is_ready) {
SPS_ERR("sps:sps_connect:sps driver not ready.\n");
return -EAGAIN;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_reg_event(bam, pipe->pipe_index, reg);
sps_bam_unlock(bam);
if (result)
SPS_ERR("sps:Fail to register event for BAM 0x%x pipe %d",
pipe->bam->props.phys_addr, pipe->pipe_index);
return result;
}
EXPORT_SYMBOL(sps_register_event);
/**
* Enable an SPS connection end point
*
*/
int sps_flow_on(struct sps_pipe *h)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
/* Enable the pipe data flow */
result = sps_rm_state_change(pipe, SPS_STATE_ENABLE);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_flow_on);
/**
* Disable an SPS connection end point
*
*/
int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
/* Disable the pipe data flow */
result = sps_rm_state_change(pipe, SPS_STATE_DISABLE);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_flow_off);
/**
* Check if the flags on a descriptor/iovec are valid
*
* @flags - flags on a descriptor/iovec
*
* @return 0 on success, negative value on error
*
*/
static int sps_check_iovec_flags(u32 flags)
{
if ((flags & SPS_IOVEC_FLAG_NWD) &&
!(flags & (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_CMD))) {
SPS_ERR("sps:NWD is only valid with EOT or CMD.\n");
return SPS_ERROR;
} else if ((flags & SPS_IOVEC_FLAG_EOT) &&
(flags & SPS_IOVEC_FLAG_CMD)) {
SPS_ERR("sps:EOT and CMD are not allowed to coexist.\n");
return SPS_ERROR;
} else if (!(flags & SPS_IOVEC_FLAG_CMD) &&
(flags & (SPS_IOVEC_FLAG_LOCK | SPS_IOVEC_FLAG_UNLOCK))) {
static char err_msg[] =
"pipe lock/unlock flags are only valid with Command Descriptor";
SPS_ERR("sps:%s.\n", err_msg);
return SPS_ERROR;
} else if ((flags & SPS_IOVEC_FLAG_LOCK) &&
(flags & SPS_IOVEC_FLAG_UNLOCK)) {
static char err_msg[] =
"Can't lock and unlock a pipe by the same Command Descriptor";
SPS_ERR("sps:%s.\n", err_msg);
return SPS_ERROR;
} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
(flags & SPS_IOVEC_FLAG_CMD)) {
SPS_ERR("sps:Immediate and CMD are not allowed to coexist.\n");
return SPS_ERROR;
} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
(flags & SPS_IOVEC_FLAG_NWD)) {
SPS_ERR("sps:Immediate and NWD are not allowed to coexist.\n");
return SPS_ERROR;
}
return 0;
}
/**
* Perform a DMA transfer on an SPS connection end point
*
*/
int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
struct sps_iovec *iovec;
int i;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (transfer == NULL) {
SPS_ERR("sps:%s:transfer is NULL.\n", __func__);
return SPS_ERROR;
} else if (transfer->iovec == NULL) {
SPS_ERR("sps:%s:iovec list is NULL.\n", __func__);
return SPS_ERROR;
} else if (transfer->iovec_count == 0) {
SPS_ERR("sps:%s:iovec list is empty.\n", __func__);
return SPS_ERROR;
} else if (transfer->iovec_phys == 0) {
SPS_ERR("sps:%s:iovec list address is invalid.\n", __func__);
return SPS_ERROR;
}
/* Verify content of IOVECs */
iovec = transfer->iovec;
for (i = 0; i < transfer->iovec_count; i++) {
u32 flags = iovec->flags;
if (iovec->size > SPS_IOVEC_MAX_SIZE) {
SPS_ERR("sps:%s:iovec size is invalid.\n", __func__);
return SPS_ERROR;
}
if (sps_check_iovec_flags(flags))
return SPS_ERROR;
iovec++;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_transfer(bam, pipe->pipe_index, transfer);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_transfer);
/**
* Perform a single DMA transfer on an SPS connection end point
*
*/
int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
void *user, u32 flags)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
}
if (sps_check_iovec_flags(flags))
return SPS_ERROR;
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_transfer_one(bam, pipe->pipe_index,
SPS_GET_LOWER_ADDR(addr), size, user,
DESC_FLAG_WORD(flags, addr));
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_transfer_one);
/**
* Read event queue for an SPS connection end point
*
*/
int sps_get_event(struct sps_pipe *h, struct sps_event_notify *notify)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (notify == NULL) {
SPS_ERR("sps:%s:event_notify is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_get_event(bam, pipe->pipe_index, notify);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_get_event);
/**
* Determine whether an SPS connection end point FIFO is empty
*
*/
int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (empty == NULL) {
SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_is_empty(bam, pipe->pipe_index, empty);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_is_pipe_empty);
/**
* Get number of free transfer entries for an SPS connection end point
*
*/
int sps_get_free_count(struct sps_pipe *h, u32 *count)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (count == NULL) {
SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_get_free_count(bam, pipe->pipe_index, count);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_get_free_count);
/**
* Reset an SPS BAM device
*
*/
int sps_device_reset(u32 dev)
{
struct sps_bam *bam;
int result;
SPS_DBG2("sps:%s: dev = 0x%x", __func__, dev);
if (dev == 0) {
SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
return SPS_ERROR;
}
if (sps == NULL || !sps->is_ready) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
mutex_lock(&sps->lock);
/* Search for the target BAM device */
bam = sps_h2bam(dev);
if (bam == NULL) {
SPS_ERR("sps:Invalid BAM device handle: 0x%x", dev);
result = SPS_ERROR;
goto exit_err;
}
mutex_lock(&bam->lock);
result = sps_bam_reset(bam);
mutex_unlock(&bam->lock);
if (result) {
SPS_ERR("sps:Fail to reset BAM device: 0x%x", dev);
goto exit_err;
}
exit_err:
mutex_unlock(&sps->lock);
return result;
}
EXPORT_SYMBOL(sps_device_reset);
/**
* Get the configuration parameters for an SPS connection end point
*
*/
int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
{
struct sps_pipe *pipe = h;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (config == NULL) {
SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
return SPS_ERROR;
}
/* Copy current client connection state */
*config = pipe->connect;
return 0;
}
EXPORT_SYMBOL(sps_get_config);
/**
* Set the configuration parameters for an SPS connection end point
*
*/
int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (config == NULL) {
SPS_ERR("sps:%s:config pointer is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_set_params(bam, pipe->pipe_index,
config->options);
if (result == 0)
pipe->connect.options = config->options;
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_set_config);
/**
* Set ownership of an SPS connection end point
*
*/
int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
struct sps_satellite *connect)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (connect == NULL) {
SPS_ERR("sps:%s:connection is NULL.\n", __func__);
return SPS_ERROR;
}
if (owner != SPS_OWNER_REMOTE) {
SPS_ERR("sps:Unsupported ownership state: %d", owner);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_set_satellite(bam, pipe->pipe_index);
if (result)
goto exit_err;
/* Return satellite connect info */
if (connect == NULL)
goto exit_err;
if (pipe->connect.mode == SPS_MODE_SRC) {
connect->dev = pipe->map->src.bam_phys;
connect->pipe_index = pipe->map->src.pipe_index;
} else {
connect->dev = pipe->map->dest.bam_phys;
connect->pipe_index = pipe->map->dest.pipe_index;
}
connect->config = SPS_CONFIG_SATELLITE;
connect->options = (enum sps_option) 0;
exit_err:
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_set_owner);
/**
* Allocate memory from the SPS Pipe-Memory.
*
*/
int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
struct sps_mem_buffer *mem_buffer)
{
SPS_DBG("sps:%s.", __func__);
if (sps == NULL)
return -ENODEV;
if (!sps->is_ready) {
SPS_ERR("sps:sps_alloc_mem:sps driver is not ready.");
return -EAGAIN;
}
if (mem_buffer == NULL || mem_buffer->size == 0) {
SPS_ERR("sps:invalid memory buffer address or size");
return SPS_ERROR;
}
if (h == NULL)
SPS_DBG("sps:allocate pipe memory before setup pipe");
else
SPS_DBG("sps:allocate pipe memory for pipe %d", h->pipe_index);
mem_buffer->phys_base = sps_mem_alloc_io(mem_buffer->size);
if (mem_buffer->phys_base == SPS_ADDR_INVALID) {
SPS_ERR("sps:invalid address of allocated memory");
return SPS_ERROR;
}
mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
return 0;
}
EXPORT_SYMBOL(sps_alloc_mem);
/**
* Free memory from the SPS Pipe-Memory.
*
*/
int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
{
SPS_DBG("sps:%s.", __func__);
if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID) {
SPS_ERR("sps:invalid memory to free");
return SPS_ERROR;
}
if (h == NULL)
SPS_DBG("sps:free pipe memory.");
else
SPS_DBG("sps:free pipe memory for pipe %d.", h->pipe_index);
sps_mem_free_io(mem_buffer->phys_base, mem_buffer->size);
return 0;
}
EXPORT_SYMBOL(sps_free_mem);
/**
* Get the number of unused descriptors in the descriptor FIFO
* of a pipe
*
*/
int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (desc_num == NULL) {
SPS_ERR("sps:%s:result pointer is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
result = sps_bam_pipe_get_unused_desc_num(bam, pipe->pipe_index,
desc_num);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_get_unused_desc_num);
/**
* Vote for or relinquish BAM DMA clock
*
*/
int sps_ctrl_bam_dma_clk(bool clk_on)
{
int ret;
SPS_DBG("sps:%s.", __func__);
if (sps == NULL || !sps->is_ready) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
if (clk_on == true) {
SPS_DBG("sps:vote for bam dma clk.\n");
ret = clk_prepare_enable(sps->bamdma_clk);
if (ret) {
SPS_ERR("sps:fail to enable bamdma_clk:ret=%d\n", ret);
return ret;
}
} else {
SPS_DBG("sps:relinquish bam dma clk.\n");
clk_disable_unprepare(sps->bamdma_clk);
}
return 0;
}
EXPORT_SYMBOL(sps_ctrl_bam_dma_clk);
/**
* Register a BAM device
*
*/
int sps_register_bam_device(const struct sps_bam_props *bam_props,
u32 *dev_handle)
{
struct sps_bam *bam = NULL;
void *virt_addr = NULL;
u32 manage;
int ok;
int result;
SPS_DBG2("sps:%s.", __func__);
if (bam_props == NULL) {
SPS_ERR("sps:%s:bam_props is NULL.\n", __func__);
return SPS_ERROR;
} else if (dev_handle == NULL) {
SPS_ERR("sps:%s:device handle is NULL.\n", __func__);
return SPS_ERROR;
}
if (sps == NULL) {
SPS_DBG2("sps:%s:sps driver is not ready.\n", __func__);
return -EPROBE_DEFER;
}
/* BAM-DMA is registered internally during power-up */
if ((!sps->is_ready) && !(bam_props->options & SPS_BAM_OPT_BAMDMA)) {
SPS_ERR("sps:sps_register_bam_device:sps driver not ready.\n");
return -EAGAIN;
}
/* Check BAM parameters */
manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
if (manage != SPS_BAM_MGR_NONE) {
if (bam_props->virt_addr == NULL && bam_props->virt_size == 0) {
SPS_ERR("sps:Invalid properties for BAM: %x",
bam_props->phys_addr);
return SPS_ERROR;
}
}
if ((bam_props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
/* BAM global is configured by local processor */
if (bam_props->summing_threshold == 0) {
SPS_ERR("sps:Invalid device ctrl properties for "
"BAM: %x", bam_props->phys_addr);
return SPS_ERROR;
}
}
manage = bam_props->manage &
(SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL);
/* In case of error */
*dev_handle = SPS_DEV_HANDLE_INVALID;
result = SPS_ERROR;
mutex_lock(&sps->lock);
/* Is this BAM already registered? */
bam = phy2bam(bam_props->phys_addr);
if (bam != NULL) {
mutex_unlock(&sps->lock);
SPS_ERR("sps:BAM is already registered: %x",
bam->props.phys_addr);
result = -EEXIST;
bam = NULL; /* Avoid error clean-up kfree(bam) */
goto exit_err;
}
/* Perform virtual mapping if required */
if ((bam_props->manage & SPS_BAM_MGR_ACCESS_MASK) !=
SPS_BAM_MGR_NONE && bam_props->virt_addr == NULL) {
/* Map the memory region */
virt_addr = ioremap(bam_props->phys_addr, bam_props->virt_size);
if (virt_addr == NULL) {
SPS_ERR("sps:Unable to map BAM IO mem:0x%x size:0x%x",
bam_props->phys_addr, bam_props->virt_size);
goto exit_err;
}
}
bam = kzalloc(sizeof(*bam), GFP_KERNEL);
if (bam == NULL) {
SPS_ERR("sps:Unable to allocate BAM device state: size 0x%x",
sizeof(*bam));
goto exit_err;
}
memset(bam, 0, sizeof(*bam));
mutex_init(&bam->lock);
mutex_lock(&bam->lock);
/* Copy configuration to BAM device descriptor */
bam->props = *bam_props;
if (virt_addr != NULL)
bam->props.virt_addr = virt_addr;
ok = sps_bam_device_init(bam);
mutex_unlock(&bam->lock);
if (ok) {
SPS_ERR("sps:Fail to init BAM device: phys 0x%0x",
bam->props.phys_addr);
goto exit_err;
}
/* Add BAM to the list */
list_add_tail(&bam->list, &sps->bams_q);
*dev_handle = (u32) bam;
result = 0;
exit_err:
mutex_unlock(&sps->lock);
if (result) {
if (bam != NULL) {
if (virt_addr != NULL)
iounmap(bam->props.virt_addr);
kfree(bam);
}
return result;
}
/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
if (sps_dma_device_init((u32) bam)) {
bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
sps_deregister_bam_device((u32) bam);
SPS_ERR("sps:Fail to init BAM-DMA BAM: phys 0x%0x",
bam->props.phys_addr);
return SPS_ERROR;
}
}
#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
SPS_INFO("sps:BAM 0x%x is registered.", bam->props.phys_addr);
return 0;
}
EXPORT_SYMBOL(sps_register_bam_device);
/**
* Deregister a BAM device
*
*/
int sps_deregister_bam_device(u32 dev_handle)
{
struct sps_bam *bam;
SPS_DBG2("sps:%s.", __func__);
if (dev_handle == 0) {
SPS_ERR("sps:%s:device handle should not be 0.\n", __func__);
return SPS_ERROR;
}
bam = sps_h2bam(dev_handle);
if (bam == NULL) {
SPS_ERR("sps:did not find a BAM for this handle");
return SPS_ERROR;
}
SPS_DBG2("sps:SPS deregister BAM: phys 0x%x.", bam->props.phys_addr);
/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
mutex_lock(&bam->lock);
(void)sps_dma_device_de_init((u32) bam);
bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
mutex_unlock(&bam->lock);
}
#endif
/* Remove the BAM from the registration list */
mutex_lock(&sps->lock);
list_del(&bam->list);
mutex_unlock(&sps->lock);
/* De-init the BAM and free resources */
mutex_lock(&bam->lock);
sps_bam_device_de_init(bam);
mutex_unlock(&bam->lock);
if (bam->props.virt_size)
(void)iounmap(bam->props.virt_addr);
kfree(bam);
return 0;
}
EXPORT_SYMBOL(sps_deregister_bam_device);
/**
* Get processed I/O vector (completed transfers)
*
*/
int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (iovec == NULL) {
SPS_ERR("sps:%s:iovec pointer is NULL.\n", __func__);
return SPS_ERROR;
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
/* Get an iovec from the BAM pipe descriptor FIFO */
result = sps_bam_pipe_get_iovec(bam, pipe->pipe_index, iovec);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_get_iovec);
/**
* Perform timer control
*
*/
int sps_timer_ctrl(struct sps_pipe *h,
struct sps_timer_ctrl *timer_ctrl,
struct sps_timer_result *timer_result)
{
struct sps_pipe *pipe = h;
struct sps_bam *bam;
int result;
SPS_DBG("sps:%s.", __func__);
if (h == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
} else if (timer_ctrl == NULL) {
SPS_ERR("sps:%s:timer_ctrl pointer is NULL.\n", __func__);
return SPS_ERROR;
} else if (timer_result == NULL) {
SPS_DBG("sps:%s:no result to return.\n", __func__);
}
bam = sps_bam_lock(pipe);
if (bam == NULL)
return SPS_ERROR;
/* Perform the BAM pipe timer control operation */
result = sps_bam_pipe_timer_ctrl(bam, pipe->pipe_index, timer_ctrl,
timer_result);
sps_bam_unlock(bam);
return result;
}
EXPORT_SYMBOL(sps_timer_ctrl);
/**
* Allocate client state context
*
*/
struct sps_pipe *sps_alloc_endpoint(void)
{
struct sps_pipe *ctx = NULL;
SPS_DBG("sps:%s.", __func__);
ctx = kzalloc(sizeof(struct sps_pipe), GFP_KERNEL);
if (ctx == NULL) {
SPS_ERR("sps:Fail to allocate pipe context.");
return NULL;
}
sps_client_init(ctx);
return ctx;
}
EXPORT_SYMBOL(sps_alloc_endpoint);
/**
* Free client state context
*
*/
int sps_free_endpoint(struct sps_pipe *ctx)
{
int res;
SPS_DBG("sps:%s.", __func__);
if (ctx == NULL) {
SPS_ERR("sps:%s:pipe is NULL.\n", __func__);
return SPS_ERROR;
}
res = sps_client_de_init(ctx);
if (res == 0)
kfree(ctx);
return res;
}
EXPORT_SYMBOL(sps_free_endpoint);
/**
* Platform Driver.
*/
static int get_platform_data(struct platform_device *pdev)
{
struct resource *resource;
struct msm_sps_platform_data *pdata;
SPS_DBG("sps:%s.", __func__);
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
SPS_ERR("sps:inavlid platform data.\n");
sps->bamdma_restricted_pipes = 0;
return -EINVAL;
} else {
sps->bamdma_restricted_pipes = pdata->bamdma_restricted_pipes;
SPS_DBG("sps:bamdma_restricted_pipes=0x%x.",
sps->bamdma_restricted_pipes);
}
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pipe_mem");
if (resource) {
sps->pipemem_phys_base = resource->start;
sps->pipemem_size = resource_size(resource);
SPS_DBG("sps:pipemem.base=0x%x,size=0x%x.",
sps->pipemem_phys_base,
sps->pipemem_size);
}
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"bamdma_bam");
if (resource) {
sps->bamdma_bam_phys_base = resource->start;
sps->bamdma_bam_size = resource_size(resource);
SPS_DBG("sps:bamdma_bam.base=0x%x,size=0x%x.",
sps->bamdma_bam_phys_base,
sps->bamdma_bam_size);
}
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"bamdma_dma");
if (resource) {
sps->bamdma_dma_phys_base = resource->start;
sps->bamdma_dma_size = resource_size(resource);
SPS_DBG("sps:bamdma_dma.base=0x%x,size=0x%x.",
sps->bamdma_dma_phys_base,
sps->bamdma_dma_size);
}
resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"bamdma_irq");
if (resource) {
sps->bamdma_irq = resource->start;
SPS_DBG("sps:bamdma_irq=%d.", sps->bamdma_irq);
}
#endif
return 0;
}
/**
* Read data from device tree
*/
static int get_device_tree_data(struct platform_device *pdev)
{
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
struct resource *resource;
SPS_DBG("sps:%s.", __func__);
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,bam-dma-res-pipes",
&sps->bamdma_restricted_pipes))
SPS_DBG("sps:No restricted bamdma pipes on this target.\n");
else
SPS_DBG("sps:bamdma_restricted_pipes=0x%x.",
sps->bamdma_restricted_pipes);
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (resource) {
sps->bamdma_bam_phys_base = resource->start;
sps->bamdma_bam_size = resource_size(resource);
SPS_DBG("sps:bamdma_bam.base=0x%x,size=0x%x.",
sps->bamdma_bam_phys_base,
sps->bamdma_bam_size);
} else {
SPS_ERR("sps:BAM DMA BAM mem unavailable.");
return -ENODEV;
}
resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (resource) {
sps->bamdma_dma_phys_base = resource->start;
sps->bamdma_dma_size = resource_size(resource);
SPS_DBG("sps:bamdma_dma.base=0x%x,size=0x%x.",
sps->bamdma_dma_phys_base,
sps->bamdma_dma_size);
} else {
SPS_ERR("sps:BAM DMA mem unavailable.");
return -ENODEV;
}
resource = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (resource) {
imem = true;
sps->pipemem_phys_base = resource->start;
sps->pipemem_size = resource_size(resource);
SPS_DBG("sps:pipemem.base=0x%x,size=0x%x.",
sps->pipemem_phys_base,
sps->pipemem_size);
} else {
imem = false;
SPS_DBG("sps:No pipe memory on this target.\n");
}
resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (resource) {
sps->bamdma_irq = resource->start;
SPS_DBG("sps:bamdma_irq=%d.", sps->bamdma_irq);
} else {
SPS_ERR("sps:BAM DMA IRQ unavailable.");
return -ENODEV;
}
#endif
if (of_property_read_u32((&pdev->dev)->of_node,
"qcom,device-type",
&d_type)) {
d_type = 1;
SPS_DBG("sps:default device type.\n");
} else
SPS_DBG("sps:device type is %d.", d_type);
enhd_pipe = of_property_read_bool((&pdev->dev)->of_node,
"qcom,pipe-attr-ee");
SPS_DBG2("sps:PIPE_ATTR_EE is %s supported.\n",
(enhd_pipe ? "" : "not"));
return 0;
}
static int __devinit msm_sps_probe(struct platform_device *pdev)
{
int ret = -ENODEV;
SPS_DBG2("sps:%s.", __func__);
if (pdev->dev.of_node) {
if (get_device_tree_data(pdev)) {
SPS_ERR("sps:Fail to get data from device tree.");
return -ENODEV;
} else
SPS_DBG("sps:get data from device tree.");
} else {
d_type = 0;
if (get_platform_data(pdev)) {
SPS_ERR("sps:Fail to get platform data.");
return -ENODEV;
} else
SPS_DBG("sps:get platform data.");
}
/* Create Device */
sps->dev_class = class_create(THIS_MODULE, SPS_DRV_NAME);
ret = alloc_chrdev_region(&sps->dev_num, 0, 1, SPS_DRV_NAME);
if (ret) {
SPS_ERR("sps:alloc_chrdev_region err.");
goto alloc_chrdev_region_err;
}
sps->dev = device_create(sps->dev_class, NULL, sps->dev_num, sps,
SPS_DRV_NAME);
if (IS_ERR(sps->dev)) {
SPS_ERR("sps:device_create err.");
goto device_create_err;
}
sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
if (IS_ERR(sps->dfab_clk)) {
if (PTR_ERR(sps->dfab_clk) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
SPS_ERR("sps:fail to get dfab_clk.");
goto dfab_clk_err;
} else {
ret = clk_set_rate(sps->dfab_clk, 64000000);
if (ret) {
SPS_ERR("sps:failed to set dfab_clk rate.");
goto dfab_clk_set_err;
}
}
if (!d_type) {
sps->pmem_clk = clk_get(sps->dev, "mem_clk");
if (IS_ERR(sps->pmem_clk)) {
if (PTR_ERR(sps->pmem_clk) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
SPS_ERR("sps:fail to get pmem_clk.");
goto dfab_clk_set_err;
} else {
ret = clk_prepare_enable(sps->pmem_clk);
if (ret) {
SPS_ERR("sps:failed to enable pmem_clk.");
goto pmem_clk_set_err;
}
}
}
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
sps->bamdma_clk = clk_get(sps->dev, "dma_bam_pclk");
if (IS_ERR(sps->bamdma_clk)) {
if (PTR_ERR(sps->bamdma_clk) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
SPS_ERR("sps:fail to get bamdma_clk.");
goto pmem_clk_set_err;
} else {
ret = clk_prepare_enable(sps->bamdma_clk);
if (ret) {
SPS_ERR("sps:failed to enable bamdma_clk. ret=%d", ret);
clk_put(sps->bamdma_clk);
goto pmem_clk_set_err;
}
}
ret = clk_prepare_enable(sps->dfab_clk);
if (ret) {
SPS_ERR("sps:failed to enable dfab_clk. ret=%d", ret);
clk_put(sps->bamdma_clk);
goto pmem_clk_set_err;
}
#endif
ret = sps_device_init();
if (ret) {
SPS_ERR("sps:sps_device_init err.");
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
clk_disable_unprepare(sps->dfab_clk);
clk_disable_unprepare(sps->bamdma_clk);
clk_put(sps->bamdma_clk);
#endif
goto pmem_clk_set_err;
}
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
clk_disable_unprepare(sps->dfab_clk);
clk_disable_unprepare(sps->bamdma_clk);
#endif
sps->is_ready = true;
SPS_INFO("sps:sps is ready.");
return 0;
pmem_clk_set_err:
if (!d_type)
clk_put(sps->pmem_clk);
dfab_clk_set_err:
clk_put(sps->dfab_clk);
dfab_clk_err:
device_destroy(sps->dev_class, sps->dev_num);
device_create_err:
unregister_chrdev_region(sps->dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sps->dev_class);
return ret;
}
static int __devexit msm_sps_remove(struct platform_device *pdev)
{
SPS_DBG("sps:%s.", __func__);
device_destroy(sps->dev_class, sps->dev_num);
unregister_chrdev_region(sps->dev_num, 1);
class_destroy(sps->dev_class);
sps_device_de_init();
clk_put(sps->dfab_clk);
if (!d_type)
clk_put(sps->pmem_clk);
clk_put(sps->bamdma_clk);
return 0;
}
static struct of_device_id msm_sps_match[] = {
{ .compatible = "qcom,msm_sps",
},
{}
};
static struct platform_driver msm_sps_driver = {
.probe = msm_sps_probe,
.driver = {
.name = SPS_DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = msm_sps_match,
},
.remove = __exit_p(msm_sps_remove),
};
/**
* Module Init.
*/
static int __init sps_init(void)
{
int ret;
#ifdef CONFIG_DEBUG_FS
sps_debugfs_init();
#endif
SPS_DBG("sps:%s.", __func__);
/* Allocate the SPS driver state struct */
sps = kzalloc(sizeof(*sps), GFP_KERNEL);
if (sps == NULL) {
SPS_ERR("sps:Unable to allocate driver state context.");
return -ENOMEM;
}
ret = platform_driver_register(&msm_sps_driver);
return ret;
}
/**
* Module Exit.
*/
static void __exit sps_exit(void)
{
SPS_DBG("sps:%s.", __func__);
platform_driver_unregister(&msm_sps_driver);
if (sps != NULL) {
kfree(sps);
sps = NULL;
}
#ifdef CONFIG_DEBUG_FS
sps_debugfs_exit();
#endif
}
arch_initcall(sps_init);
module_exit(sps_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Smart Peripheral Switch (SPS)");
| gpl-2.0 |
duncand0nuts/linux | drivers/input/misc/tps65218-pwrbutton.c | 945 | 3152 | /*
* Texas Instruments' TPS65218 Power Button Input Driver
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Felipe Balbi <balbi@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/tps65218.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct tps65218_pwrbutton {
struct device *dev;
struct tps65218 *tps;
struct input_dev *idev;
};
static irqreturn_t tps65218_pwr_irq(int irq, void *_pwr)
{
struct tps65218_pwrbutton *pwr = _pwr;
unsigned int reg;
int error;
error = tps65218_reg_read(pwr->tps, TPS65218_REG_STATUS, ®);
if (error) {
dev_err(pwr->dev, "can't read register: %d\n", error);
goto out;
}
if (reg & TPS65218_STATUS_PB_STATE) {
input_report_key(pwr->idev, KEY_POWER, 1);
pm_wakeup_event(pwr->dev, 0);
} else {
input_report_key(pwr->idev, KEY_POWER, 0);
}
input_sync(pwr->idev);
out:
return IRQ_HANDLED;
}
static int tps65218_pwron_probe(struct platform_device *pdev)
{
struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct tps65218_pwrbutton *pwr;
struct input_dev *idev;
int error;
int irq;
pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
if (!pwr)
return -ENOMEM;
idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
idev->name = "tps65218_pwrbutton";
idev->phys = "tps65218_pwrbutton/input0";
idev->dev.parent = dev;
idev->id.bustype = BUS_I2C;
input_set_capability(idev, EV_KEY, KEY_POWER);
pwr->tps = tps;
pwr->dev = dev;
pwr->idev = idev;
platform_set_drvdata(pdev, pwr);
device_init_wakeup(dev, true);
irq = platform_get_irq(pdev, 0);
error = devm_request_threaded_irq(dev, irq, NULL, tps65218_pwr_irq,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
"tps65218-pwrbutton", pwr);
if (error) {
dev_err(dev, "failed to request IRQ #%d: %d\n",
irq, error);
return error;
}
error= input_register_device(idev);
if (error) {
dev_err(dev, "Can't register power button: %d\n", error);
return error;
}
return 0;
}
static const struct of_device_id of_tps65218_pwr_match[] = {
{ .compatible = "ti,tps65218-pwrbutton" },
{ },
};
MODULE_DEVICE_TABLE(of, of_tps65218_pwr_match);
static struct platform_driver tps65218_pwron_driver = {
.probe = tps65218_pwron_probe,
.driver = {
.name = "tps65218_pwrbutton",
.of_match_table = of_tps65218_pwr_match,
},
};
module_platform_driver(tps65218_pwron_driver);
MODULE_DESCRIPTION("TPS65218 Power Button");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
| gpl-2.0 |
abev66/linux-kernel-vta2-N7 | kernel/time/tick-broadcast.c | 1457 | 15907 | /*
* linux/kernel/time/tick-broadcast.c
*
* This file contains functions which emulate a local clock-event
* device via a broadcast event source.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*
* This code is licenced under the GPL version 2. For details see
* kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include "tick-internal.h"
/*
* Broadcast support for broken x86 hardware, where the local apic
* timer stops in C3 state.
*/
static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif
/*
* Debugging: see timer_list.c
*/
struct tick_device *tick_get_broadcast_device(void)
{
return &tick_broadcast_device;
}
struct cpumask *tick_get_broadcast_mask(void)
{
return to_cpumask(tick_broadcast_mask);
}
/*
* Start the device in periodic mode
*/
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
if (bc)
tick_setup_periodic(bc, 1);
}
/*
* Check, if the device can be utilized as broadcast device:
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
if ((tick_broadcast_device.evtdev &&
tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
return 1;
}
/*
* Check, if the device is the broadcast device
*/
int tick_is_broadcast_device(struct clock_event_device *dev)
{
return (dev && tick_broadcast_device.evtdev == dev);
}
/*
* Check, if the device is disfunctional and a place holder, which
* needs to be handled by the broadcast device.
*/
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
* mode disabled. This signals, that the device needs to be
* operated from the broadcast device and is a placeholder for
* the cpu local device.
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
ret = 1;
} else {
/*
* When the new device is not affected by the stop
* feature and the cpu is marked in the broadcast mask
* then clear the broadcast bit.
*/
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
int cpu = smp_processor_id();
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
static void tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
/*
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->event_handler(td->evtdev);
}
if (!cpumask_empty(mask)) {
/*
* It might be necessary to actually check whether the devices
* have different broadcast functions. For now, just use the
* one of the first device. This works as long as we have this
* misfeature only on x86 (lapic)
*/
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
td->evtdev->broadcast(mask);
}
}
/*
* Periodic broadcast:
* - invoke the broadcast handlers
*/
static void tick_do_periodic_broadcast(void)
{
raw_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Event handler for periodic broadcast ticks
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
ktime_t next;
tick_do_periodic_broadcast();
/*
* The device is in periodic mode. No reprogramming necessary:
*/
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
return;
/*
* Setup the next period for devices, which do not have
* periodic mode. We read dev->next_event first and add to it
* when the event already expired. clockevents_program_event()
* sets dev->next_event only when the event is really
* programmed to the device.
*/
for (next = dev->next_event; ;) {
next = ktime_add(next, tick_period);
if (!clockevents_program_event(dev, next, ktime_get()))
return;
tick_do_periodic_broadcast();
}
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
static void tick_do_broadcast_on_off(unsigned long *reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu, bc_stopped;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
bc = tick_broadcast_device.evtdev;
/*
* Is the device not affected by the powerstate ?
*/
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
goto out;
if (!tick_device_is_functional(dev))
goto out;
bc_stopped = cpumask_empty(tick_get_broadcast_mask());
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (!tick_broadcast_force &&
cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
break;
}
if (cpumask_empty(tick_get_broadcast_mask())) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
tick_broadcast_setup_oneshot(bc);
}
out:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop.
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
else
tick_do_broadcast_on_off(&reason);
}
/*
* Set the periodic handler depending on broadcast on/off
*/
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
if (!broadcast)
dev->event_handler = tick_handle_periodic;
else
dev->event_handler = tick_handle_periodic_broadcast;
}
/*
* Remove a CPU from broadcasting
*/
void tick_shutdown_broadcast(unsigned int *cpup)
{
struct clock_event_device *bc;
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpumask_empty(tick_get_broadcast_mask()))
clockevents_shutdown(bc);
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
int broadcast = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc) {
clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(bc);
broadcast = cpumask_test_cpu(smp_processor_id(),
tick_get_broadcast_mask());
break;
case TICKDEV_MODE_ONESHOT:
broadcast = tick_resume_broadcast_oneshot(bc);
break;
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
#ifdef CONFIG_TICK_ONESHOT
/* FIXME: use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
/*
* Exposed for debugging: see timer_list.c
*/
struct cpumask *tick_get_broadcast_oneshot_mask(void)
{
return to_cpumask(tick_broadcast_oneshot_mask);
}
static int tick_broadcast_set_event(ktime_t expires, int force)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
return tick_dev_program_event(bc, expires, force);
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
return 0;
}
/*
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
void tick_check_oneshot_broadcast(int cpu)
{
if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
}
}
/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
int cpu;
raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
cpumask_clear(to_cpumask(tmpmask));
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpumask_set_cpu(cpu, to_cpumask(tmpmask));
else if (td->evtdev->next_event.tv64 < next_event.tv64)
next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
* Wakeup the cpus which have an expired event.
*/
tick_do_broadcast(to_cpumask(tmpmask));
/*
* Two reasons for reprogram:
*
* - The global event did not expire any CPU local
* events. This happens in dyntick mode, as the maximum PIT
* delta is quite small.
*
* - There are pending events on sleeping CPUs which were not
* in the event mask
*/
if (next_event.tv64 != KTIME_MAX) {
/*
* Rearm the broadcast device. If event expired,
* repeat the above
*/
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
void tick_broadcast_oneshot_control(unsigned long reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu;
/*
* Periodic mode does not care about the enter/exit of power
* states
*/
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
return;
/*
* We are called with preemtion disabled from the depth of the
* idle code, so we can't be moved away.
*/
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
return;
bc = tick_broadcast_device.evtdev;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
tick_broadcast_set_event(dev->next_event, 1);
}
} else {
if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_clear_cpu(cpu,
tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
if (dev->next_event.tv64 != KTIME_MAX)
tick_program_event(dev->next_event, 1);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Reset the one shot broadcast for a cpu
*
* Called with tick_broadcast_lock held
*/
static void tick_broadcast_clear_oneshot(int cpu)
{
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
ktime_t expires)
{
struct tick_device *td;
int cpu;
for_each_cpu(cpu, mask) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev)
td->evtdev->next_event = expires;
}
}
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
bc->event_handler = tick_handle_oneshot_broadcast;
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
/* Take the do_timer update */
tick_do_timer_cpu = cpu;
/*
* We must be careful here. There might be other CPUs
* waiting for periodic broadcast. We need to set the
* oneshot_mask bits for those and program the
* broadcast device to fire.
*/
cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
cpumask_or(tick_get_broadcast_oneshot_mask(),
tick_get_broadcast_oneshot_mask(),
to_cpumask(tmpmask));
if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
tick_broadcast_init_next_event(to_cpumask(tmpmask),
tick_next_period);
tick_broadcast_set_event(tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
} else {
/*
* The first cpu which switches to oneshot mode sets
* the bit for all other cpus which are in the general
* (periodic) broadcast mask. So the bit is set and
* would prevent the first broadcast enter after this
* to program the bc device.
*/
tick_broadcast_clear_oneshot(cpu);
}
}
/*
* Select oneshot operating mode for the broadcast device
*/
void tick_broadcast_switch_to_oneshot(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Remove a dead CPU from broadcasting
*/
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
* stop the broadcast device!
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Check, whether the broadcast device is in one shot mode
*/
int tick_broadcast_oneshot_active(void)
{
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
/*
* Check whether the broadcast device supports oneshot.
*/
bool tick_broadcast_oneshot_available(void)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}
#endif
| gpl-2.0 |
calixtolinuxplatform/linux-3.12.10-ti-calixto | drivers/ata/pata_palmld.c | 3249 | 3180 | /*
* drivers/ata/pata_palmld.c
*
* Driver for IDE channel in Palm LifeDrive
*
* Based on research of:
* Alex Osborne <ato@meshy.org>
*
* Rewrite for mainline:
* Marek Vasut <marek.vasut@gmail.com>
*
* Rewritten version based on pata_ixp4xx_cf.c:
* ixp4xx PATA/Compact Flash driver
* Copyright (C) 2006-07 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <scsi/scsi_host.h>
#include <mach/palmld.h>
#define DRV_NAME "pata_palmld"
static struct gpio palmld_hdd_gpios[] = {
{ GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" },
{ GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" },
};
static struct scsi_host_template palmld_sht = {
ATA_PIO_SHT(DRV_NAME),
};
static struct ata_port_operations palmld_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer_noirq,
.cable_detect = ata_cable_40wire,
};
static int palmld_pata_probe(struct platform_device *pdev)
{
struct ata_host *host;
struct ata_port *ap;
void __iomem *mem;
int ret;
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
ret = -ENOMEM;
goto err1;
}
/* remap drive's physical memory address */
mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
if (!mem) {
ret = -ENOMEM;
goto err1;
}
/* request and activate power GPIO, IRQ GPIO */
ret = gpio_request_array(palmld_hdd_gpios,
ARRAY_SIZE(palmld_hdd_gpios));
if (ret)
goto err1;
/* reset the drive */
gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
msleep(30);
gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1);
msleep(30);
/* setup the ata port */
ap = host->ports[0];
ap->ops = &palmld_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_PIO_POLLING;
/* memory mapping voodoo */
ap->ioaddr.cmd_addr = mem + 0x10;
ap->ioaddr.altstatus_addr = mem + 0xe;
ap->ioaddr.ctl_addr = mem + 0xe;
/* start the port */
ata_sff_std_ports(&ap->ioaddr);
/* activate host */
ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
&palmld_sht);
if (ret)
goto err2;
return ret;
err2:
gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
err1:
return ret;
}
static int palmld_pata_remove(struct platform_device *dev)
{
ata_platform_remove_one(dev);
/* power down the HDD */
gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
return 0;
}
static struct platform_driver palmld_pata_platform_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
.probe = palmld_pata_probe,
.remove = palmld_pata_remove,
};
module_platform_driver(palmld_pata_platform_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PalmLD PATA driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
hafidzduddin/samsung_codina_kernel | drivers/mtd/chips/chipreg.c | 3505 | 2372 | /*
* Registration for chip drivers
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
static DEFINE_SPINLOCK(chip_drvs_lock);
static LIST_HEAD(chip_drvs_list);
void register_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_add(&drv->list, &chip_drvs_list);
spin_unlock(&chip_drvs_lock);
}
void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_del(&drv->list);
spin_unlock(&chip_drvs_lock);
}
static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
{
struct list_head *pos;
struct mtd_chip_driver *ret = NULL, *this;
spin_lock(&chip_drvs_lock);
list_for_each(pos, &chip_drvs_list) {
this = list_entry(pos, typeof(*this), list);
if (!strcmp(this->name, name)) {
ret = this;
break;
}
}
if (ret && !try_module_get(ret->module))
ret = NULL;
spin_unlock(&chip_drvs_lock);
return ret;
}
/* Hide all the horrid details, like some silly person taking
get_module_symbol() away from us, from the caller. */
struct mtd_info *do_map_probe(const char *name, struct map_info *map)
{
struct mtd_chip_driver *drv;
struct mtd_info *ret;
drv = get_mtd_chip_driver(name);
if (!drv && !request_module("%s", name))
drv = get_mtd_chip_driver(name);
if (!drv)
return NULL;
ret = drv->probe(map);
/* We decrease the use count here. It may have been a
probe-only module, which is no longer required from this
point, having given us a handle on (and increased the use
count of) the actual driver code.
*/
module_put(drv->module);
if (ret)
return ret;
return NULL;
}
/*
* Destroy an MTD device which was created for a map device.
* Make sure the MTD device is already unregistered before calling this
*/
void map_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (map->fldrv->destroy)
map->fldrv->destroy(mtd);
module_put(map->fldrv->module);
kfree(mtd);
}
EXPORT_SYMBOL(register_mtd_chip_driver);
EXPORT_SYMBOL(unregister_mtd_chip_driver);
EXPORT_SYMBOL(do_map_probe);
EXPORT_SYMBOL(map_destroy);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
| gpl-2.0 |
MassStash/htc_m8whl_kernel_sense | arch/mips/pnx8550/common/int.c | 4785 | 6566 | /*
*
* Copyright (C) 2005 Embedded Alley Solutions, Inc
* Ported to 2.6.
*
* Per Hallsmark, per.hallsmark@mvista.com
* Copyright (C) 2000, 2001 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
*
* Cleaned up and bug fixing: Pete Popov, ppopov@embeddedalley.com
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/random.h>
#include <linux/module.h>
#include <asm/io.h>
#include <int.h>
#include <uart.h>
/* default prio for interrupts */
/* first one is a no-no so therefore always prio 0 (disabled) */
static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
0, 1, 1, 1, 1, 15, 1, 1, 1, 1, // 0 - 9
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10 - 19
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20 - 29
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30 - 39
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40 - 49
1, 1, 1, 1, 1, 1, 1, 1, 2, 1, // 50 - 59
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60 - 69
1 // 70
};
static void hw0_irqdispatch(int irq)
{
/* find out which interrupt */
irq = PNX8550_GIC_VECTOR_0 >> 3;
if (irq == 0) {
printk("hw0_irqdispatch: irq 0, spurious interrupt?\n");
return;
}
do_IRQ(PNX8550_INT_GIC_MIN + irq);
}
static void timer_irqdispatch(int irq)
{
irq = (0x01c0 & read_c0_config7()) >> 6;
if (unlikely(irq == 0)) {
printk("timer_irqdispatch: irq 0, spurious interrupt?\n");
return;
}
if (irq & 0x1)
do_IRQ(PNX8550_INT_TIMER1);
if (irq & 0x2)
do_IRQ(PNX8550_INT_TIMER2);
if (irq & 0x4)
do_IRQ(PNX8550_INT_TIMER3);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP2)
hw0_irqdispatch(2);
else if (pending & STATUSF_IP7) {
if (read_c0_config7() & 0x01c0)
timer_irqdispatch(7);
} else
spurious_interrupt();
}
static inline void modify_cp0_intmask(unsigned clr_mask, unsigned set_mask)
{
unsigned long status = read_c0_status();
status &= ~((clr_mask & 0xFF) << 8);
status |= (set_mask & 0xFF) << 8;
write_c0_status(status);
}
static inline void mask_gic_int(unsigned int irq_nr)
{
/* interrupt disabled, bit 26(WE_ENABLE)=1 and bit 16(enable)=0 */
PNX8550_GIC_REQ(irq_nr) = 1<<28; /* set priority to 0 */
}
static inline void unmask_gic_int(unsigned int irq_nr)
{
/* set prio mask to lower four bits and enable interrupt */
PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr];
}
static inline void mask_irq(struct irq_data *d)
{
unsigned int irq_nr = d->irq;
if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
modify_cp0_intmask(1 << irq_nr, 0);
} else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
(irq_nr <= PNX8550_INT_GIC_MAX)) {
mask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
} else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
(irq_nr <= PNX8550_INT_TIMER_MAX)) {
modify_cp0_intmask(1 << 7, 0);
} else {
printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
}
}
static inline void unmask_irq(struct irq_data *d)
{
unsigned int irq_nr = d->irq;
if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
modify_cp0_intmask(0, 1 << irq_nr);
} else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
(irq_nr <= PNX8550_INT_GIC_MAX)) {
unmask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
} else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
(irq_nr <= PNX8550_INT_TIMER_MAX)) {
modify_cp0_intmask(0, 1 << 7);
} else {
printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
}
}
int pnx8550_set_gic_priority(int irq, int priority)
{
int gic_irq = irq-PNX8550_INT_GIC_MIN;
int prev_priority = PNX8550_GIC_REQ(gic_irq) & 0xf;
gic_prio[gic_irq] = priority;
PNX8550_GIC_REQ(gic_irq) |= (0x10000000 | gic_prio[gic_irq]);
return prev_priority;
}
static struct irq_chip level_irq_type = {
.name = "PNX Level IRQ",
.irq_mask = mask_irq,
.irq_unmask = unmask_irq,
};
static struct irqaction gic_action = {
.handler = no_action,
.flags = IRQF_NO_THREAD,
.name = "GIC",
};
static struct irqaction timer_action = {
.handler = no_action,
.flags = IRQF_TIMER,
.name = "Timer",
};
void __init arch_init_irq(void)
{
int i;
int configPR;
for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* init of GIC/IPC interrupts */
/* should be done before cp0 since cp0 init enables the GIC int */
for (i = PNX8550_INT_GIC_MIN; i <= PNX8550_INT_GIC_MAX; i++) {
int gic_int_line = i - PNX8550_INT_GIC_MIN;
if (gic_int_line == 0 )
continue; // don't fiddle with int 0
/*
* enable change of TARGET, ENABLE and ACTIVE_LOW bits
* set TARGET 0 to route through hw0 interrupt
* set ACTIVE_LOW 0 active high (correct?)
*
* We really should setup an interrupt description table
* to do this nicely.
* Note, PCI INTA is active low on the bus, but inverted
* in the GIC, so to us it's active high.
*/
PNX8550_GIC_REQ(i - PNX8550_INT_GIC_MIN) = 0x1E000000;
/* mask/priority is still 0 so we will not get any
* interrupts until it is unmasked */
irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
}
/* Priority level 0 */
PNX8550_GIC_PRIMASK_0 = PNX8550_GIC_PRIMASK_1 = 0;
/* Set int vector table address */
PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
/* init of Timer interrupts */
for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* Stop Timer 1-3 */
configPR = read_c0_config7();
configPR |= 0x00000038;
write_c0_config7(configPR);
irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
}
EXPORT_SYMBOL(pnx8550_set_gic_priority);
| gpl-2.0 |
TipsyOs-Devices/android_kernel_samsung_trlte | fs/udf/symlink.c | 6577 | 2621 | /*
* symlink.c
*
* PURPOSE
* Symlink handling routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998-2001 Ben Fennema
* (C) 1999 Stelias Computing Inc
*
* HISTORY
*
* 04/16/99 blf Created.
*
*/
#include "udfdecl.h"
#include <asm/uaccess.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "udf_i.h"
static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
int fromlen, unsigned char *to)
{
struct pathComponent *pc;
int elen = 0;
unsigned char *p = to;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
if (pc->lengthComponentIdent > 0)
break;
/* Fall through */
case 2:
p = to;
*p++ = '/';
break;
case 3:
memcpy(p, "../", 3);
p += 3;
break;
case 4:
memcpy(p, "./", 2);
p += 2;
/* that would be . - just ignore */
break;
case 5:
p += udf_get_filename(sb, pc->componentIdent, p,
pc->lengthComponentIdent);
*p++ = '/';
break;
}
elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
}
static int udf_symlink_filler(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
int err = -EIO;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
down_read(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
bh = sb_bread(inode->i_sb, pos);
if (!bh)
goto out;
symlink = bh->b_data;
}
udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
brelse(bh);
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
out:
up_read(&iinfo->i_data_sem);
SetPageError(page);
kunmap(page);
unlock_page(page);
return err;
}
/*
* symlinks can't do much...
*/
const struct address_space_operations udf_symlink_aops = {
.readpage = udf_symlink_filler,
};
| gpl-2.0 |
Negamann303/kernel-nk1-negalite-lt02ltespr | net/ax25/ax25_in.c | 7601 | 10789 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
/*
* Given a fragment, queue it on the fragment queue and if the fragment
* is complete, send it back to ax25_rx_iframe.
*/
static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb)
{
struct sk_buff *skbn, *skbo;
if (ax25->fragno != 0) {
if (!(*skb->data & AX25_SEG_FIRST)) {
if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) {
/* Enqueue fragment */
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen += skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
/* Last fragment received ? */
if (ax25->fragno == 0) {
skbn = alloc_skb(AX25_MAX_HEADER_LEN +
ax25->fraglen,
GFP_ATOMIC);
if (!skbn) {
skb_queue_purge(&ax25->frag_queue);
return 1;
}
skb_reserve(skbn, AX25_MAX_HEADER_LEN);
skbn->dev = ax25->ax25_dev->dev;
skb_reset_network_header(skbn);
skb_reset_transport_header(skbn);
/* Copy data from the fragments */
while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) {
skb_copy_from_linear_data(skbo,
skb_put(skbn, skbo->len),
skbo->len);
kfree_skb(skbo);
}
ax25->fraglen = 0;
if (ax25_rx_iframe(ax25, skbn) == 0)
kfree_skb(skbn);
}
return 1;
}
}
} else {
/* First fragment received */
if (*skb->data & AX25_SEG_FIRST) {
skb_queue_purge(&ax25->frag_queue);
ax25->fragno = *skb->data & AX25_SEG_REM;
skb_pull(skb, 1); /* skip fragno */
ax25->fraglen = skb->len;
skb_queue_tail(&ax25->frag_queue, skb);
return 1;
}
}
return 0;
}
/*
* This is where all valid I frames are sent to, to be dispatched to
* whichever protocol requires them.
*/
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
int (*func)(struct sk_buff *, ax25_cb *);
unsigned char pid;
int queued = 0;
if (skb == NULL) return 0;
ax25_start_idletimer(ax25);
pid = *skb->data;
if (pid == AX25_P_IP) {
/* working around a TCP bug to keep additional listeners
* happy. TCP re-uses the buffer and destroys the original
* content.
*/
struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
if (skbn != NULL) {
kfree_skb(skb);
skb = skbn;
}
skb_pull(skb, 1); /* Remove PID */
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->dev = ax25->ax25_dev->dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
return 1;
}
if (pid == AX25_P_SEGMENT) {
skb_pull(skb, 1); /* Remove PID */
return ax25_rx_fragment(ax25, skb);
}
if ((func = ax25_protocol_function(pid)) != NULL) {
skb_pull(skb, 1); /* Remove PID */
return (*func)(skb, ax25);
}
if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
ax25->pidincl) {
if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
queued = 1;
else
ax25->condition |= AX25_COND_OWN_RX_BUSY;
}
}
return queued;
}
/*
* Higher level upcall for a LAPB frame
*/
static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama)
{
int queued = 0;
if (ax25->state == AX25_STATE_0)
return 0;
switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
case AX25_PROTO_STD_SIMPLEX:
case AX25_PROTO_STD_DUPLEX:
queued = ax25_std_frame_in(ax25, skb, type);
break;
#ifdef CONFIG_AX25_DAMA_SLAVE
case AX25_PROTO_DAMA_SLAVE:
if (dama || ax25->ax25_dev->dama.slave)
queued = ax25_ds_frame_in(ax25, skb, type);
else
queued = ax25_std_frame_in(ax25, skb, type);
break;
#endif
}
return queued;
}
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
ax25_address *dev_addr, struct packet_type *ptype)
{
ax25_address src, dest, *next_digi = NULL;
int type = 0, mine = 0, dama;
struct sock *make, *sk;
ax25_digi dp, reverse_dp;
ax25_cb *ax25;
ax25_dev *ax25_dev;
/*
* Process the AX.25/LAPB frame.
*/
skb_reset_transport_header(skb);
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
goto free;
/*
* Parse the address header.
*/
if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
goto free;
/*
* Ours perhaps ?
*/
if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */
next_digi = &dp.calls[dp.lastrepeat + 1];
/*
* Pull of the AX.25 headers leaving the CTRL/PID bytes
*/
skb_pull(skb, ax25_addr_size(&dp));
/* For our port addresses ? */
if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* Also match on any registered callsign from L3/4 */
if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
mine = 1;
/* UI frame - bypass LAPB processing */
if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
skb_set_transport_header(skb, 2); /* skip control and pid */
ax25_send_to_raw(&dest, skb, skb->data[1]);
if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
goto free;
/* Now we are pointing at the pid byte */
switch (skb->data[1]) {
case AX25_P_IP:
skb_pull(skb,2); /* drop PID/CTRL */
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
break;
case AX25_P_ARP:
skb_pull(skb,2);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
skb->protocol = htons(ETH_P_ARP);
netif_rx(skb);
break;
case AX25_P_TEXT:
/* Now find a suitable dgram socket */
sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
if (sk != NULL) {
bh_lock_sock(sk);
if (atomic_read(&sk->sk_rmem_alloc) >=
sk->sk_rcvbuf) {
kfree_skb(skb);
} else {
/*
* Remove the control and PID.
*/
skb_pull(skb, 2);
if (sock_queue_rcv_skb(sk, skb) != 0)
kfree_skb(skb);
}
bh_unlock_sock(sk);
sock_put(sk);
} else {
kfree_skb(skb);
}
break;
default:
kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */
break;
}
return 0;
}
/*
* Is connected mode supported on this device ?
* If not, should we DM the incoming frame (except DMs) or
* silently ignore them. For now we stay quiet.
*/
if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
goto free;
/* LAPB */
/* AX.25 state 1-4 */
ax25_digi_invert(&dp, &reverse_dp);
if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
/*
* Process the frame. If it is queued up internally it
* returns one otherwise we free it immediately. This
* routine itself wakes the user context layers so we do
* no further work
*/
if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
kfree_skb(skb);
ax25_cb_put(ax25);
return 0;
}
/* AX.25 state 0 (disconnected) */
/* a) received not a SABM(E) */
if ((*skb->data & ~AX25_PF) != AX25_SABM &&
(*skb->data & ~AX25_PF) != AX25_SABME) {
/*
* Never reply to a DM. Also ignore any connects for
* addresses that are not our interfaces and not a socket.
*/
if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
/* b) received SABM(E) */
if (dp.lastrepeat + 1 == dp.ndigi)
sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
else
sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
if (sk != NULL) {
bh_lock_sock(sk);
if (sk_acceptq_is_full(sk) ||
(make = ax25_make_new(sk, ax25_dev)) == NULL) {
if (mine)
ax25_return_dm(dev, &src, &dest, &dp);
kfree_skb(skb);
bh_unlock_sock(sk);
sock_put(sk);
return 0;
}
ax25 = ax25_sk(make);
skb_set_owner_r(skb, make);
skb_queue_head(&sk->sk_receive_queue, skb);
make->sk_state = TCP_ESTABLISHED;
sk->sk_ack_backlog++;
bh_unlock_sock(sk);
} else {
if (!mine)
goto free;
if ((ax25 = ax25_create_cb()) == NULL) {
ax25_return_dm(dev, &src, &dest, &dp);
goto free;
}
ax25_fillin_cb(ax25, ax25_dev);
}
ax25->source_addr = dest;
ax25->dest_addr = src;
/*
* Sort out any digipeated paths.
*/
if (dp.ndigi && !ax25->digipeat &&
(ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
kfree_skb(skb);
ax25_destroy_socket(ax25);
if (sk)
sock_put(sk);
return 0;
}
if (dp.ndigi == 0) {
kfree(ax25->digipeat);
ax25->digipeat = NULL;
} else {
/* Reverse the source SABM's path */
memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
}
if ((*skb->data & ~AX25_PF) == AX25_SABME) {
ax25->modulus = AX25_EMODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW];
} else {
ax25->modulus = AX25_MODULUS;
ax25->window = ax25_dev->values[AX25_VALUES_WINDOW];
}
ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);
#ifdef CONFIG_AX25_DAMA_SLAVE
if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
ax25_dama_on(ax25);
#endif
ax25->state = AX25_STATE_3;
ax25_cb_add(ax25);
ax25_start_heartbeat(ax25);
ax25_start_t3timer(ax25);
ax25_start_idletimer(ax25);
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
sock_put(sk);
} else {
free:
kfree_skb(skb);
}
return 0;
}
/*
* Receive an AX.25 frame via a SLIP interface.
*/
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
skb_orphan(skb);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
return 0;
}
if ((*skb->data & 0x0F) != 0) {
kfree_skb(skb); /* Not a KISS data frame */
return 0;
}
skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */
return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype);
}
| gpl-2.0 |
timemath/hmfs | lib/mpi/generic_mpih-lshift.c | 9905 | 2151 | /* mpihelp-lshift.c - MPI helper functions
* Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
* and store the USIZE least significant digits of the result at WP.
* Return the bits shifted out from the most significant digit.
*
* Argument constraints:
* 1. 0 < CNT < BITS_PER_MP_LIMB
* 2. If the result is to be written over the input, WP must be >= UP.
*/
mpi_limb_t
mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt)
{
mpi_limb_t high_limb, low_limb;
unsigned sh_1, sh_2;
mpi_size_t i;
mpi_limb_t retval;
sh_1 = cnt;
wp += 1;
sh_2 = BITS_PER_MPI_LIMB - sh_1;
i = usize - 1;
low_limb = up[i];
retval = low_limb >> sh_2;
high_limb = low_limb;
while (--i >= 0) {
low_limb = up[i];
wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
high_limb = low_limb;
}
wp[i] = high_limb << sh_1;
return retval;
}
| gpl-2.0 |
matnyman/xhci | sound/oss/v_midi.c | 12721 | 6457 | /*
* sound/oss/v_midi.c
*
* The low level driver for the Sound Blaster DS chips.
*
*
* Copyright (C) by Hannu Savolainen 1993-1996
*
* USS/Lite for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
* ??
*
* Changes
* Alan Cox Modularisation, changed memory allocations
* Christoph Hellwig Adapted to module_init/module_exit
*
* Status
* Untested
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "v_midi.h"
static vmidi_devc *v_devc[2] = { NULL, NULL};
static int midi1,midi2;
static void *midi_mem = NULL;
/*
* The DSP channel can be used either for input or output. Variable
* 'sb_irq_mode' will be set when the program calls read or write first time
* after open. Current version doesn't support mode changes without closing
* and reopening the device. Support for this feature may be implemented in a
* future version of this driver.
*/
static int v_midi_open (int dev, int mode,
void (*input) (int dev, unsigned char data),
void (*output) (int dev)
)
{
vmidi_devc *devc = midi_devs[dev]->devc;
unsigned long flags;
if (devc == NULL)
return -(ENXIO);
spin_lock_irqsave(&devc->lock,flags);
if (devc->opened)
{
spin_unlock_irqrestore(&devc->lock,flags);
return -(EBUSY);
}
devc->opened = 1;
spin_unlock_irqrestore(&devc->lock,flags);
devc->intr_active = 1;
if (mode & OPEN_READ)
{
devc->input_opened = 1;
devc->midi_input_intr = input;
}
return 0;
}
static void v_midi_close (int dev)
{
vmidi_devc *devc = midi_devs[dev]->devc;
unsigned long flags;
if (devc == NULL)
return;
spin_lock_irqsave(&devc->lock,flags);
devc->intr_active = 0;
devc->input_opened = 0;
devc->opened = 0;
spin_unlock_irqrestore(&devc->lock,flags);
}
static int v_midi_out (int dev, unsigned char midi_byte)
{
vmidi_devc *devc = midi_devs[dev]->devc;
vmidi_devc *pdevc;
if (devc == NULL)
return -ENXIO;
pdevc = midi_devs[devc->pair_mididev]->devc;
if (pdevc->input_opened > 0){
if (MIDIbuf_avail(pdevc->my_mididev) > 500)
return 0;
pdevc->midi_input_intr (pdevc->my_mididev, midi_byte);
}
return 1;
}
static inline int v_midi_start_read (int dev)
{
return 0;
}
static int v_midi_end_read (int dev)
{
vmidi_devc *devc = midi_devs[dev]->devc;
if (devc == NULL)
return -ENXIO;
devc->intr_active = 0;
return 0;
}
/* why -EPERM and not -EINVAL?? */
static inline int v_midi_ioctl (int dev, unsigned cmd, void __user *arg)
{
return -EPERM;
}
#define MIDI_SYNTH_NAME "Loopback MIDI"
#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
#include "midi_synth.h"
static struct midi_operations v_midi_operations =
{
.owner = THIS_MODULE,
.info = {"Loopback MIDI Port 1", 0, 0, SNDCARD_VMIDI},
.converter = &std_midi_synth,
.in_info = {0},
.open = v_midi_open,
.close = v_midi_close,
.ioctl = v_midi_ioctl,
.outputc = v_midi_out,
.start_read = v_midi_start_read,
.end_read = v_midi_end_read,
};
static struct midi_operations v_midi_operations2 =
{
.owner = THIS_MODULE,
.info = {"Loopback MIDI Port 2", 0, 0, SNDCARD_VMIDI},
.converter = &std_midi_synth,
.in_info = {0},
.open = v_midi_open,
.close = v_midi_close,
.ioctl = v_midi_ioctl,
.outputc = v_midi_out,
.start_read = v_midi_start_read,
.end_read = v_midi_end_read,
};
/*
* We kmalloc just one of these - it makes life simpler and the code
* cleaner and the memory handling far more efficient
*/
struct vmidi_memory
{
/* Must be first */
struct midi_operations m_ops[2];
struct synth_operations s_ops[2];
struct vmidi_devc v_ops[2];
};
static void __init attach_v_midi (struct address_info *hw_config)
{
struct vmidi_memory *m;
/* printk("Attaching v_midi device.....\n"); */
midi1 = sound_alloc_mididev();
if (midi1 == -1)
{
printk(KERN_ERR "v_midi: Too many midi devices detected\n");
return;
}
m = kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL);
if (m == NULL)
{
printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n");
sound_unload_mididev(midi1);
return;
}
midi_mem = m;
midi_devs[midi1] = &m->m_ops[0];
midi2 = sound_alloc_mididev();
if (midi2 == -1)
{
printk (KERN_ERR "v_midi: Too many midi devices detected\n");
kfree(m);
sound_unload_mididev(midi1);
return;
}
midi_devs[midi2] = &m->m_ops[1];
/* printk("VMIDI1: %d VMIDI2: %d\n",midi1,midi2); */
/* for MIDI-1 */
v_devc[0] = &m->v_ops[0];
memcpy ((char *) midi_devs[midi1], (char *) &v_midi_operations,
sizeof (struct midi_operations));
v_devc[0]->my_mididev = midi1;
v_devc[0]->pair_mididev = midi2;
v_devc[0]->opened = v_devc[0]->input_opened = 0;
v_devc[0]->intr_active = 0;
v_devc[0]->midi_input_intr = NULL;
spin_lock_init(&v_devc[0]->lock);
midi_devs[midi1]->devc = v_devc[0];
midi_devs[midi1]->converter = &m->s_ops[0];
std_midi_synth.midi_dev = midi1;
memcpy ((char *) midi_devs[midi1]->converter, (char *) &std_midi_synth,
sizeof (struct synth_operations));
midi_devs[midi1]->converter->id = "V_MIDI 1";
/* for MIDI-2 */
v_devc[1] = &m->v_ops[1];
memcpy ((char *) midi_devs[midi2], (char *) &v_midi_operations2,
sizeof (struct midi_operations));
v_devc[1]->my_mididev = midi2;
v_devc[1]->pair_mididev = midi1;
v_devc[1]->opened = v_devc[1]->input_opened = 0;
v_devc[1]->intr_active = 0;
v_devc[1]->midi_input_intr = NULL;
spin_lock_init(&v_devc[1]->lock);
midi_devs[midi2]->devc = v_devc[1];
midi_devs[midi2]->converter = &m->s_ops[1];
std_midi_synth.midi_dev = midi2;
memcpy ((char *) midi_devs[midi2]->converter, (char *) &std_midi_synth,
sizeof (struct synth_operations));
midi_devs[midi2]->converter->id = "V_MIDI 2";
sequencer_init();
/* printk("Attached v_midi device\n"); */
}
static inline int __init probe_v_midi(struct address_info *hw_config)
{
return(1); /* always OK */
}
static void __exit unload_v_midi(struct address_info *hw_config)
{
sound_unload_mididev(midi1);
sound_unload_mididev(midi2);
kfree(midi_mem);
}
static struct address_info cfg; /* dummy */
static int __init init_vmidi(void)
{
printk("MIDI Loopback device driver\n");
if (!probe_v_midi(&cfg))
return -ENODEV;
attach_v_midi(&cfg);
return 0;
}
static void __exit cleanup_vmidi(void)
{
unload_v_midi(&cfg);
}
module_init(init_vmidi);
module_exit(cleanup_vmidi);
MODULE_LICENSE("GPL");
| gpl-2.0 |
pali/linux-n900 | drivers/video/console/tileblit.c | 13233 | 3928 | /*
* linux/drivers/video/console/tileblit.c -- Tile Blitting Operation
*
* Copyright (C) 2004 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
struct fb_tilearea area;
area.sx = sx;
area.sy = sy;
area.dx = dx;
area.dy = dy;
area.height = height;
area.width = width;
info->tileops->fb_tilecopy(info, &area);
}
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
struct fb_tilerect rect;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
int fgshift = (vc->vc_hi_font_mask) ? 9 : 8;
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
rect.fg = attr_fgcol_ec(fgshift, vc, info);
rect.bg = attr_bgcol_ec(bgshift, vc, info);
rect.sx = sx;
rect.sy = sy;
rect.width = width;
rect.height = height;
rect.rop = ROP_COPY;
info->tileops->fb_tilefill(info, &rect);
}
static void tile_putcs(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg)
{
struct fb_tileblit blit;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int size = sizeof(u32) * count, i;
blit.sx = xx;
blit.sy = yy;
blit.width = count;
blit.height = 1;
blit.fg = fg;
blit.bg = bg;
blit.length = count;
blit.indices = (u32 *) fb_get_buffer_offset(info, &info->pixmap, size);
for (i = 0; i < count; i++)
blit.indices[i] = (u32)(scr_readw(s++) & charmask);
info->tileops->fb_tileblit(info, &blit);
}
static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
int bottom_only)
{
return;
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int softback_lines, int fg, int bg)
{
struct fb_tilecursor cursor;
int use_sw = (vc->vc_cursor_type & 0x10);
cursor.sx = vc->vc_x;
cursor.sy = vc->vc_y;
cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1;
cursor.fg = fg;
cursor.bg = bg;
switch (vc->vc_cursor_type & 0x0f) {
case CUR_NONE:
cursor.shape = FB_TILE_CURSOR_NONE;
break;
case CUR_UNDERLINE:
cursor.shape = FB_TILE_CURSOR_UNDERLINE;
break;
case CUR_LOWER_THIRD:
cursor.shape = FB_TILE_CURSOR_LOWER_THIRD;
break;
case CUR_LOWER_HALF:
cursor.shape = FB_TILE_CURSOR_LOWER_HALF;
break;
case CUR_TWO_THIRDS:
cursor.shape = FB_TILE_CURSOR_TWO_THIRDS;
break;
case CUR_BLOCK:
default:
cursor.shape = FB_TILE_CURSOR_BLOCK;
break;
}
info->tileops->fb_tilecursor(info, &cursor);
}
static int tile_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int err;
err = fb_pan_display(info, &ops->var);
ops->var.xoffset = info->var.xoffset;
ops->var.yoffset = info->var.yoffset;
ops->var.vmode = info->var.vmode;
return err;
}
void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
{
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
ops->cursor = tile_cursor;
ops->update_start = tile_update_start;
if (ops->p) {
map.width = vc->vc_font.width;
map.height = vc->vc_font.height;
map.depth = 1;
map.length = (ops->p->userfont) ?
FNTCHARCNT(ops->p->fontdata) : 256;
map.data = ops->p->fontdata;
info->tileops->fb_settile(info, &map);
}
}
EXPORT_SYMBOL(fbcon_set_tileops);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Tile Blitting Operation");
MODULE_LICENSE("GPL");
| gpl-2.0 |
coolbho3k/galaxysii_oc | drivers/acpi/acpica/exstore.c | 946 | 15287 | /******************************************************************************
*
* Module Name: exstore - AML Interpreter object store support
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2010, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acdispat.h"
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exstore")
/* Local prototypes */
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
union acpi_operand_object *dest_desc,
struct acpi_walk_state *walk_state);
/*******************************************************************************
*
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
* *dest_desc - Where to store it. Must be an NS node
* or a union acpi_operand_object of type
* Reference;
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Store the value described by source_desc into the location
* described by dest_desc. Called by various interpreter
* functions to store the result of an operation into
* the destination operand -- not just simply the actual "Store"
* ASL operator.
*
******************************************************************************/
acpi_status
acpi_ex_store(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object *ref_desc = dest_desc;
ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
/* Validate parameters */
if (!source_desc || !dest_desc) {
ACPI_ERROR((AE_INFO, "Null parameter"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
/* dest_desc can be either a namespace node or an ACPI object */
if (ACPI_GET_DESCRIPTOR_TYPE(dest_desc) == ACPI_DESC_TYPE_NAMED) {
/*
* Dest is a namespace node,
* Storing an object into a Named node.
*/
status = acpi_ex_store_object_to_node(source_desc,
(struct
acpi_namespace_node *)
dest_desc, walk_state,
ACPI_IMPLICIT_CONVERSION);
return_ACPI_STATUS(status);
}
/* Destination object must be a Reference or a Constant object */
switch (dest_desc->common.type) {
case ACPI_TYPE_LOCAL_REFERENCE:
break;
case ACPI_TYPE_INTEGER:
/* Allow stores to Constants -- a Noop as per ACPI spec */
if (dest_desc->common.flags & AOPOBJ_AML_CONSTANT) {
return_ACPI_STATUS(AE_OK);
}
/*lint -fallthrough */
default:
/* Destination is not a Reference object */
ACPI_ERROR((AE_INFO,
"Target is not a Reference or Constant object - %s [%p]",
acpi_ut_get_object_type_name(dest_desc),
dest_desc));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* Examine the Reference class. These cases are handled:
*
* 1) Store to Name (Change the object associated with a name)
* 2) Store to an indexed area of a Buffer or Package
* 3) Store to a Method Local or Arg
* 4) Store to the debug object
*/
switch (ref_desc->reference.class) {
case ACPI_REFCLASS_REFOF:
/* Storing an object into a Name "container" */
status = acpi_ex_store_object_to_node(source_desc,
ref_desc->reference.
object, walk_state,
ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_REFCLASS_INDEX:
/* Storing to an Index (pointer into a packager or buffer) */
status =
acpi_ex_store_object_to_index(source_desc, ref_desc,
walk_state);
break;
case ACPI_REFCLASS_LOCAL:
case ACPI_REFCLASS_ARG:
/* Store to a method local/arg */
status =
acpi_ds_store_object_to_local(ref_desc->reference.class,
ref_desc->reference.value,
source_desc, walk_state);
break;
case ACPI_REFCLASS_DEBUG:
/*
* Storing to the Debug object causes the value stored to be
* displayed and otherwise has no effect -- see ACPI Specification
*/
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** Write to Debug Object: Object %p %s ****:\n\n",
source_desc,
acpi_ut_get_object_type_name(source_desc)));
ACPI_DEBUG_OBJECT(source_desc, 0, 0);
break;
default:
ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
ref_desc->reference.class));
ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
status = AE_AML_INTERNAL;
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_store_object_to_index
*
* PARAMETERS: *source_desc - Value to be stored
* *dest_desc - Named object to receive the value
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Store the object to indexed Buffer or Package element
*
******************************************************************************/
static acpi_status
acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
union acpi_operand_object *index_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
union acpi_operand_object *new_desc;
u8 value = 0;
u32 i;
ACPI_FUNCTION_TRACE(ex_store_object_to_index);
/*
* Destination must be a reference pointer, and
* must point to either a buffer or a package
*/
switch (index_desc->reference.target_type) {
case ACPI_TYPE_PACKAGE:
/*
* Storing to a package element. Copy the object and replace
* any existing object with the new object. No implicit
* conversion is performed.
*
* The object at *(index_desc->Reference.Where) is the
* element within the package that is to be modified.
* The parent package object is at index_desc->Reference.Object
*/
obj_desc = *(index_desc->reference.where);
if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE &&
source_desc->reference.class == ACPI_REFCLASS_TABLE) {
/* This is a DDBHandle, just add a reference to it */
acpi_ut_add_reference(source_desc);
new_desc = source_desc;
} else {
/* Normal object, copy it */
status =
acpi_ut_copy_iobject_to_iobject(source_desc,
&new_desc,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
if (obj_desc) {
/* Decrement reference count by the ref count of the parent package */
for (i = 0; i < ((union acpi_operand_object *)
index_desc->reference.object)->common.
reference_count; i++) {
acpi_ut_remove_reference(obj_desc);
}
}
*(index_desc->reference.where) = new_desc;
/* Increment ref count by the ref count of the parent package-1 */
for (i = 1; i < ((union acpi_operand_object *)
index_desc->reference.object)->common.
reference_count; i++) {
acpi_ut_add_reference(new_desc);
}
break;
case ACPI_TYPE_BUFFER_FIELD:
/*
* Store into a Buffer or String (not actually a real buffer_field)
* at a location defined by an Index.
*
* The first 8-bit element of the source object is written to the
* 8-bit Buffer location defined by the Index destination object,
* according to the ACPI 2.0 specification.
*/
/*
* Make sure the target is a Buffer or String. An error should
* not happen here, since the reference_object was constructed
* by the INDEX_OP code.
*/
obj_desc = index_desc->reference.object;
if ((obj_desc->common.type != ACPI_TYPE_BUFFER) &&
(obj_desc->common.type != ACPI_TYPE_STRING)) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* The assignment of the individual elements will be slightly
* different for each source type.
*/
switch (source_desc->common.type) {
case ACPI_TYPE_INTEGER:
/* Use the least-significant byte of the integer */
value = (u8) (source_desc->integer.value);
break;
case ACPI_TYPE_BUFFER:
case ACPI_TYPE_STRING:
/* Note: Takes advantage of common string/buffer fields */
value = source_desc->buffer.pointer[0];
break;
default:
/* All other types are invalid */
ACPI_ERROR((AE_INFO,
"Source must be Integer/Buffer/String type, not %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/* Store the source value into the target buffer byte */
obj_desc->buffer.pointer[index_desc->reference.value] = value;
break;
default:
ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
status = AE_AML_OPERAND_TYPE;
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_store_object_to_node
*
* PARAMETERS: source_desc - Value to be stored
* Node - Named object to receive the value
* walk_state - Current walk state
* implicit_conversion - Perform implicit conversion (yes/no)
*
* RETURN: Status
*
* DESCRIPTION: Store the object to the named object.
*
* The Assignment of an object to a named object is handled here
* The value passed in will replace the current value (if any)
* with the input value.
*
* When storing into an object the data is converted to the
* target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
* Assumes parameters are already validated.
*
******************************************************************************/
acpi_status
acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
struct acpi_namespace_node *node,
struct acpi_walk_state *walk_state,
u8 implicit_conversion)
{
acpi_status status = AE_OK;
union acpi_operand_object *target_desc;
union acpi_operand_object *new_desc;
acpi_object_type target_type;
ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
/* Get current type of the node, and object attached to Node */
target_type = acpi_ns_get_type(node);
target_desc = acpi_ns_get_attached_object(node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Storing %p(%s) into node %p(%s)\n",
source_desc,
acpi_ut_get_object_type_name(source_desc), node,
acpi_ut_get_type_name(target_type)));
/*
* Resolve the source object to an actual value
* (If it is a reference object)
*/
status = acpi_ex_resolve_object(&source_desc, target_type, walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* If no implicit conversion, drop into the default case below */
if ((!implicit_conversion) ||
((walk_state->opcode == AML_COPY_OP) &&
(target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
(target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
(target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
/*
* Force execution of default (no implicit conversion). Note:
* copy_object does not perform an implicit conversion, as per the ACPI
* spec -- except in case of region/bank/index fields -- because these
* objects must retain their original type permanently.
*/
target_type = ACPI_TYPE_ANY;
}
/* Do the actual store operation */
switch (target_type) {
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/* For fields, copy the source data to the target field. */
status = acpi_ex_write_data_to_field(source_desc, target_desc,
&walk_state->result_obj);
break;
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* These target types are all of type Integer/String/Buffer, and
* therefore support implicit conversion before the store.
*
* Copy and/or convert the source object to a new target object
*/
status =
acpi_ex_store_object_to_object(source_desc, target_desc,
&new_desc, walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (new_desc != target_desc) {
/*
* Store the new new_desc as the new value of the Name, and set
* the Name's type to that of the value being stored in it.
* source_desc reference count is incremented by attach_object.
*
* Note: This may change the type of the node if an explicit store
* has been performed such that the node/object type has been
* changed.
*/
status =
acpi_ns_attach_object(node, new_desc,
new_desc->common.type);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Store %s into %s via Convert/Attach\n",
acpi_ut_get_object_type_name
(source_desc),
acpi_ut_get_object_type_name
(new_desc)));
}
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Storing %s (%p) directly into node (%p) with no implicit conversion\n",
acpi_ut_get_object_type_name(source_desc),
source_desc, node));
/* No conversions for all other types. Just attach the source object */
status = acpi_ns_attach_object(node, source_desc,
source_desc->common.type);
break;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
joaquimorg/android_kernel_huawei_u8510 | drivers/acpi/acpica/tbxface.c | 946 | 21929 | /******************************************************************************
*
* Module Name: tbxface - Public interfaces to the ACPI subsystem
* ACPI table oriented interfaces
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2010, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "actables.h"
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxface")
/* Local prototypes */
static acpi_status acpi_tb_load_namespace(void);
static int no_auto_ssdt;
/*******************************************************************************
*
* FUNCTION: acpi_allocate_root_table
*
* PARAMETERS: initial_table_count - Size of initial_table_array, in number of
* struct acpi_table_desc structures
*
* RETURN: Status
*
* DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
* acpi_initialize_tables.
*
******************************************************************************/
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
return (acpi_tb_resize_root_table_list());
}
/*******************************************************************************
*
* FUNCTION: acpi_initialize_tables
*
* PARAMETERS: initial_table_array - Pointer to an array of pre-allocated
* struct acpi_table_desc structures. If NULL, the
* array is dynamically allocated.
* initial_table_count - Size of initial_table_array, in number of
* struct acpi_table_desc structures
* allow_realloc - Flag to tell Table Manager if resize of
* pre-allocated array is allowed. Ignored
* if initial_table_array is NULL.
*
* RETURN: Status
*
* DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
*
* NOTE: Allows static allocation of the initial table array in order
* to avoid the use of dynamic memory in confined environments
* such as the kernel boot sequence where it may not be available.
*
* If the host OS memory managers are initialized, use NULL for
* initial_table_array, and the table will be dynamically allocated.
*
******************************************************************************/
acpi_status __init
acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
u32 initial_table_count, u8 allow_resize)
{
acpi_physical_address rsdp_address;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_initialize_tables);
/*
* Set up the Root Table Array
* Allocate the table array if requested
*/
if (!initial_table_array) {
status = acpi_allocate_root_table(initial_table_count);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} else {
/* Root Table Array has been statically allocated by the host */
ACPI_MEMSET(initial_table_array, 0,
(acpi_size) initial_table_count *
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
acpi_gbl_root_table_list.max_table_count = initial_table_count;
acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
if (allow_resize) {
acpi_gbl_root_table_list.flags |=
ACPI_ROOT_ALLOW_RESIZE;
}
}
/* Get the address of the RSDP */
rsdp_address = acpi_os_get_root_pointer();
if (!rsdp_address) {
return_ACPI_STATUS(AE_NOT_FOUND);
}
/*
* Get the root table (RSDT or XSDT) and extract all entries to the local
* Root Table Array. This array contains the information of the RSDT/XSDT
* in a common, more useable format.
*/
status = acpi_tb_parse_root_table(rsdp_address);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_reallocate_root_table
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
* root list from the previously provided scratch area. Should
* be called once dynamic memory allocation is available in the
* kernel
*
******************************************************************************/
acpi_status acpi_reallocate_root_table(void)
{
struct acpi_table_desc *tables;
acpi_size new_size;
acpi_size current_size;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
/*
* Only reallocate the root table if the host provided a static buffer
* for the table array in the call to acpi_initialize_tables.
*/
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
return_ACPI_STATUS(AE_SUPPORT);
}
/*
* Get the current size of the root table and add the default
* increment to create the new table size.
*/
current_size = (acpi_size)
acpi_gbl_root_table_list.current_table_count *
sizeof(struct acpi_table_desc);
new_size = current_size +
(ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
/* Create new array and copy the old array */
tables = ACPI_ALLOCATE_ZEROED(new_size);
if (!tables) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
/*
* Update the root table descriptor. The new size will be the current
* number of tables plus the increment, independent of the reserved
* size of the original table list.
*/
acpi_gbl_root_table_list.tables = tables;
acpi_gbl_root_table_list.max_table_count =
acpi_gbl_root_table_list.current_table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT;
acpi_gbl_root_table_list.flags =
ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_load_table
*
* PARAMETERS: table_ptr - pointer to a buffer containing the entire
* table to be loaded
*
* RETURN: Status
*
* DESCRIPTION: This function is called to load a table from the caller's
* buffer. The buffer must contain an entire ACPI Table including
* a valid header. The header fields will be verified, and if it
* is determined that the table is invalid, the call will fail.
*
******************************************************************************/
acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
{
acpi_status status;
u32 table_index;
struct acpi_table_desc table_desc;
if (!table_ptr)
return AE_BAD_PARAMETER;
ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
table_desc.pointer = table_ptr;
table_desc.length = table_ptr->length;
table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
/*
* Install the new table into the local data structures
*/
status = acpi_tb_add_table(&table_desc, &table_index);
if (ACPI_FAILURE(status)) {
return status;
}
status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
return status;
}
ACPI_EXPORT_SYMBOL(acpi_load_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
*
* PARAMETERS: Signature - ACPI signature of needed table
* Instance - Which instance (for SSDTs)
* out_table_header - The pointer to the table header to fill
*
* RETURN: Status and pointer to mapped table header
*
* DESCRIPTION: Finds an ACPI table header.
*
* NOTE: Caller is responsible in unmapping the header with
* acpi_os_unmap_memory
*
******************************************************************************/
acpi_status
acpi_get_table_header(char *signature,
u32 instance, struct acpi_table_header *out_table_header)
{
u32 i;
u32 j;
struct acpi_table_header *header;
/* Parameter validation */
if (!signature || !out_table_header) {
return (AE_BAD_PARAMETER);
}
/* Walk the root table list */
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
continue;
}
if (++j < instance) {
continue;
}
if (!acpi_gbl_root_table_list.tables[i].pointer) {
if ((acpi_gbl_root_table_list.tables[i].flags &
ACPI_TABLE_ORIGIN_MASK) ==
ACPI_TABLE_ORIGIN_MAPPED) {
header =
acpi_os_map_memory(acpi_gbl_root_table_list.
tables[i].address,
sizeof(struct
acpi_table_header));
if (!header) {
return AE_NO_MEMORY;
}
ACPI_MEMCPY(out_table_header, header,
sizeof(struct acpi_table_header));
acpi_os_unmap_memory(header,
sizeof(struct
acpi_table_header));
} else {
return AE_NOT_FOUND;
}
} else {
ACPI_MEMCPY(out_table_header,
acpi_gbl_root_table_list.tables[i].pointer,
sizeof(struct acpi_table_header));
}
return (AE_OK);
}
return (AE_NOT_FOUND);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/*******************************************************************************
*
* FUNCTION: acpi_unload_table_id
*
* PARAMETERS: id - Owner ID of the table to be removed.
*
* RETURN: Status
*
* DESCRIPTION: This routine is used to force the unload of a table (by id)
*
******************************************************************************/
acpi_status acpi_unload_table_id(acpi_owner_id id)
{
int i;
acpi_status status = AE_NOT_EXIST;
ACPI_FUNCTION_TRACE(acpi_unload_table_id);
/* Find table in the global table list */
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
continue;
}
/*
* Delete all namespace objects owned by this table. Note that these
* objects can appear anywhere in the namespace by virtue of the AML
* "Scope" operator. Thus, we need to track ownership by an ID, not
* simply a position within the hierarchy
*/
acpi_tb_delete_namespace_by_owner(i);
status = acpi_tb_release_owner_id(i);
acpi_tb_set_table_loaded_flag(i, FALSE);
break;
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_with_size
*
* PARAMETERS: Signature - ACPI signature of needed table
* Instance - Which instance (for SSDTs)
* out_table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to table
*
* DESCRIPTION: Finds and verifies an ACPI table.
*
******************************************************************************/
acpi_status
acpi_get_table_with_size(char *signature,
u32 instance, struct acpi_table_header **out_table,
acpi_size *tbl_size)
{
u32 i;
u32 j;
acpi_status status;
/* Parameter validation */
if (!signature || !out_table) {
return (AE_BAD_PARAMETER);
}
/* Walk the root table list */
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
if (!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
continue;
}
if (++j < instance) {
continue;
}
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
if (ACPI_SUCCESS(status)) {
*out_table = acpi_gbl_root_table_list.tables[i].pointer;
*tbl_size = acpi_gbl_root_table_list.tables[i].length;
}
if (!acpi_gbl_permanent_mmap) {
acpi_gbl_root_table_list.tables[i].pointer = NULL;
}
return (status);
}
return (AE_NOT_FOUND);
}
acpi_status
acpi_get_table(char *signature,
u32 instance, struct acpi_table_header **out_table)
{
acpi_size tbl_size;
return acpi_get_table_with_size(signature,
instance, out_table, &tbl_size);
}
ACPI_EXPORT_SYMBOL(acpi_get_table)
/*******************************************************************************
*
* FUNCTION: acpi_get_table_by_index
*
* PARAMETERS: table_index - Table index
* Table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to the table
*
* DESCRIPTION: Obtain a table by an index into the global table list.
*
******************************************************************************/
acpi_status
acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
/* Parameter validation */
if (!table) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Validate index */
if (table_index >= acpi_gbl_root_table_list.current_table_count) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
/* Table is not mapped, map it */
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
tables[table_index]);
if (ACPI_FAILURE(status)) {
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
}
*table = acpi_gbl_root_table_list.tables[table_index].pointer;
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
/*******************************************************************************
*
* FUNCTION: acpi_tb_load_namespace
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
* the RSDT/XSDT.
*
******************************************************************************/
static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
* Load the namespace. The DSDT is required, but any SSDT and
* PSDT tables are optional. Verify the DSDT.
*/
if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
ACPI_SIG_DSDT)
||
ACPI_FAILURE(acpi_tb_verify_table
(&acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT]))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
}
/*
* Save the DSDT pointer for simple access. This is the mapped memory
* address. We must take care here because the address of the .Tables
* array can change dynamically as tables are loaded at run-time. Note:
* .Pointer field is not validated until after call to acpi_tb_verify_table.
*/
acpi_gbl_DSDT =
acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
/*
* Optionally copy the entire DSDT to local memory (instead of simply
* mapping it.) There are some BIOSs that corrupt or replace the original
* DSDT, creating the need for this option. Default is FALSE, do not copy
* the DSDT.
*/
if (acpi_gbl_copy_dsdt_locally) {
new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
if (new_dsdt) {
acpi_gbl_DSDT = new_dsdt;
}
}
/*
* Save the original DSDT header for detection of table corruption
* and/or replacement of the DSDT from outside the OS.
*/
ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
sizeof(struct acpi_table_header));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
&&
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.tables[i].
signature), ACPI_SIG_PSDT))
||
ACPI_FAILURE(acpi_tb_verify_table
(&acpi_gbl_root_table_list.tables[i]))) {
continue;
}
if (no_auto_ssdt) {
printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
continue;
}
/* Ignore errors while loading tables, get as many as possible */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
(void)acpi_ns_load_table(i, acpi_gbl_root_node);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
}
ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_load_tables
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
*
******************************************************************************/
acpi_status acpi_load_tables(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_load_tables);
/* Load the namespace from the tables */
status = acpi_tb_load_namespace();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While loading namespace from ACPI tables"));
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_load_tables)
/*******************************************************************************
*
* FUNCTION: acpi_install_table_handler
*
* PARAMETERS: Handler - Table event handler
* Context - Value passed to the handler on each event
*
* RETURN: Status
*
* DESCRIPTION: Install table event handler
*
******************************************************************************/
acpi_status
acpi_install_table_handler(acpi_tbl_handler handler, void *context)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_install_table_handler);
if (!handler) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Don't allow more than one handler */
if (acpi_gbl_table_handler) {
status = AE_ALREADY_EXISTS;
goto cleanup;
}
/* Install the handler */
acpi_gbl_table_handler = handler;
acpi_gbl_table_handler_context = context;
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
/*******************************************************************************
*
* FUNCTION: acpi_remove_table_handler
*
* PARAMETERS: Handler - Table event handler that was installed
* previously.
*
* RETURN: Status
*
* DESCRIPTION: Remove table event handler
*
******************************************************************************/
acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Make sure that the installed handler is the same */
if (!handler || handler != acpi_gbl_table_handler) {
status = AE_BAD_PARAMETER;
goto cleanup;
}
/* Remove the handler */
acpi_gbl_table_handler = NULL;
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
static int __init acpi_no_auto_ssdt_setup(char *s) {
printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
no_auto_ssdt = 1;
return 1;
}
__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
| gpl-2.0 |
hggh/linux | arch/x86/kernel/jump_label.c | 1458 | 4027 | /*
* jump label x86 support
*
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
*
*/
#include <linux/jump_label.h>
#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/cpu.h>
#include <asm/kprobes.h>
#include <asm/alternative.h>
#ifdef HAVE_JUMP_LABEL
union jump_code_union {
char code[JUMP_LABEL_NOP_SIZE];
struct {
char jump;
int offset;
} __attribute__((packed));
};
static void bug_at(unsigned char *ip, int line)
{
/*
* The location is not an op that we were expecting.
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
BUG();
}
static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
void *(*poker)(void *, const void *, size_t),
int init)
{
union jump_code_union code;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
if (type == JUMP_LABEL_ENABLE) {
if (init) {
/*
* Jump label is enabled for the first time.
* So we expect a default_nop...
*/
if (unlikely(memcmp((void *)entry->code, default_nop, 5)
!= 0))
bug_at((void *)entry->code, __LINE__);
} else {
/*
* ...otherwise expect an ideal_nop. Otherwise
* something went horribly wrong.
*/
if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
!= 0))
bug_at((void *)entry->code, __LINE__);
}
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
} else {
/*
* We are disabling this jump label. If it is not what
* we think it is, then something must have gone wrong.
* If this is the first initialization call, then we
* are converting the default nop to the ideal nop.
*/
if (init) {
if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
bug_at((void *)entry->code, __LINE__);
} else {
code.jump = 0xe9;
code.offset = entry->target -
(entry->code + JUMP_LABEL_NOP_SIZE);
if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
bug_at((void *)entry->code, __LINE__);
}
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
}
/*
* Make text_poke_bp() a default fallback poker.
*
* At the time the change is being done, just ignore whether we
* are doing nop -> jump or jump -> nop transition, and assume
* always nop being the 'currently valid' instruction
*
*/
if (poker)
(*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
else
text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
(void *)entry->code + JUMP_LABEL_NOP_SIZE);
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
get_online_cpus();
mutex_lock(&text_mutex);
__jump_label_transform(entry, type, NULL, 0);
mutex_unlock(&text_mutex);
put_online_cpus();
}
static enum {
JL_STATE_START,
JL_STATE_NO_UPDATE,
JL_STATE_UPDATE,
} jlstate __initdata_or_module = JL_STATE_START;
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
/*
* This function is called at boot up and when modules are
* first loaded. Check if the default nop, the one that is
* inserted at compile time, is the ideal nop. If it is, then
* we do not need to update the nop, and we can leave it as is.
* If it is not, then we need to update the nop to the ideal nop.
*/
if (jlstate == JL_STATE_START) {
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
if (memcmp(ideal_nop, default_nop, 5) != 0)
jlstate = JL_STATE_UPDATE;
else
jlstate = JL_STATE_NO_UPDATE;
}
if (jlstate == JL_STATE_UPDATE)
__jump_label_transform(entry, type, text_poke_early, 1);
}
#endif
| gpl-2.0 |
gianogli/boeffla-kernel-cm-s3-7 | drivers/i2c/busses/i2c-sis5595.c | 1714 | 11883 | /*
Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Note: we assume there can only be one SIS5595 with one SMBus interface */
/*
Note: all have mfr. ID 0x1039.
SUPPORTED PCI ID
5595 0008
Note: these chips contain a 0008 device which is incompatible with the
5595. We recognize these by the presence of the listed
"blacklist" PCI ID and refuse to load.
NOT SUPPORTED PCI ID BLACKLIST PCI ID
540 0008 0540
550 0008 0550
5513 0008 5511
5581 0008 5597
5582 0008 5597
5597 0008 5597
5598 0008 5597/5598
630 0008 0630
645 0008 0645
646 0008 0646
648 0008 0648
650 0008 0650
651 0008 0651
730 0008 0730
735 0008 0735
745 0008 0745
746 0008 0746
*/
/* TO DO:
* Add Block Transfers (ugly, but supported by the adapter)
* Add adapter resets
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/io.h>
static int blacklist[] = {
PCI_DEVICE_ID_SI_540,
PCI_DEVICE_ID_SI_550,
PCI_DEVICE_ID_SI_630,
PCI_DEVICE_ID_SI_645,
PCI_DEVICE_ID_SI_646,
PCI_DEVICE_ID_SI_648,
PCI_DEVICE_ID_SI_650,
PCI_DEVICE_ID_SI_651,
PCI_DEVICE_ID_SI_730,
PCI_DEVICE_ID_SI_735,
PCI_DEVICE_ID_SI_745,
PCI_DEVICE_ID_SI_746,
PCI_DEVICE_ID_SI_5511, /* 5513 chip has the 0008 device but that ID
shows up in other chips so we use the 5511
ID for recognition */
PCI_DEVICE_ID_SI_5597,
PCI_DEVICE_ID_SI_5598,
0, /* terminates the list */
};
/* Length of ISA address segment */
#define SIS5595_EXTENT 8
/* SIS5595 SMBus registers */
#define SMB_STS_LO 0x00
#define SMB_STS_HI 0x01
#define SMB_CTL_LO 0x02
#define SMB_CTL_HI 0x03
#define SMB_ADDR 0x04
#define SMB_CMD 0x05
#define SMB_PCNT 0x06
#define SMB_CNT 0x07
#define SMB_BYTE 0x08
#define SMB_DEV 0x10
#define SMB_DB0 0x11
#define SMB_DB1 0x12
#define SMB_HAA 0x13
/* PCI Address Constants */
#define SMB_INDEX 0x38
#define SMB_DAT 0x39
#define SIS5595_ENABLE_REG 0x40
#define ACPI_BASE 0x90
/* Other settings */
#define MAX_TIMEOUT 500
/* SIS5595 constants */
#define SIS5595_QUICK 0x00
#define SIS5595_BYTE 0x02
#define SIS5595_BYTE_DATA 0x04
#define SIS5595_WORD_DATA 0x06
#define SIS5595_PROC_CALL 0x08
#define SIS5595_BLOCK_DATA 0x0A
/* insmod parameters */
/* If force_addr is set to anything different from 0, we forcibly enable
the device at the given address. */
static u16 force_addr;
module_param(force_addr, ushort, 0);
MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller");
static struct pci_driver sis5595_driver;
static unsigned short sis5595_base;
static struct pci_dev *sis5595_pdev;
static u8 sis5595_read(u8 reg)
{
outb(reg, sis5595_base + SMB_INDEX);
return inb(sis5595_base + SMB_DAT);
}
static void sis5595_write(u8 reg, u8 data)
{
outb(reg, sis5595_base + SMB_INDEX);
outb(data, sis5595_base + SMB_DAT);
}
static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
{
u16 a;
u8 val;
int *i;
int retval;
/* Look for imposters */
for (i = blacklist; *i != 0; i++) {
struct pci_dev *dev;
dev = pci_get_device(PCI_VENDOR_ID_SI, *i, NULL);
if (dev) {
dev_err(&SIS5595_dev->dev, "Looked for SIS5595 but found unsupported device %.4x\n", *i);
pci_dev_put(dev);
return -ENODEV;
}
}
/* Determine the address of the SMBus areas */
pci_read_config_word(SIS5595_dev, ACPI_BASE, &sis5595_base);
if (sis5595_base == 0 && force_addr == 0) {
dev_err(&SIS5595_dev->dev, "ACPI base address uninitialized - upgrade BIOS or use force_addr=0xaddr\n");
return -ENODEV;
}
if (force_addr)
sis5595_base = force_addr & ~(SIS5595_EXTENT - 1);
dev_dbg(&SIS5595_dev->dev, "ACPI Base address: %04x\n", sis5595_base);
/* NB: We grab just the two SMBus registers here, but this may still
* interfere with ACPI :-( */
retval = acpi_check_region(sis5595_base + SMB_INDEX, 2,
sis5595_driver.name);
if (retval)
return retval;
if (!request_region(sis5595_base + SMB_INDEX, 2,
sis5595_driver.name)) {
dev_err(&SIS5595_dev->dev, "SMBus registers 0x%04x-0x%04x already in use!\n",
sis5595_base + SMB_INDEX, sis5595_base + SMB_INDEX + 1);
return -ENODEV;
}
if (force_addr) {
dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base);
if (pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base)
!= PCIBIOS_SUCCESSFUL)
goto error;
if (pci_read_config_word(SIS5595_dev, ACPI_BASE, &a)
!= PCIBIOS_SUCCESSFUL)
goto error;
if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) {
/* doesn't work for some chips! */
dev_err(&SIS5595_dev->dev, "force address failed - not supported?\n");
goto error;
}
}
if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
!= PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
dev_info(&SIS5595_dev->dev, "enabling ACPI\n");
if (pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80)
!= PCIBIOS_SUCCESSFUL)
goto error;
if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
!= PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
/* doesn't work for some chips? */
dev_err(&SIS5595_dev->dev, "ACPI enable failed - not supported?\n");
goto error;
}
}
/* Everything is happy */
return 0;
error:
release_region(sis5595_base + SMB_INDEX, 2);
return -ENODEV;
}
static int sis5595_transaction(struct i2c_adapter *adap)
{
int temp;
int result = 0;
int timeout = 0;
/* Make sure the SMBus host is ready to start transmitting */
temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8);
if (temp != 0x00) {
dev_dbg(&adap->dev, "SMBus busy (%04x). Resetting...\n", temp);
sis5595_write(SMB_STS_LO, temp & 0xff);
sis5595_write(SMB_STS_HI, temp >> 8);
if ((temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8)) != 0x00) {
dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&adap->dev, "Successful!\n");
}
}
/* start the transaction by setting bit 4 */
sis5595_write(SMB_CTL_LO, sis5595_read(SMB_CTL_LO) | 0x10);
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = sis5595_read(SMB_STS_LO);
} while (!(temp & 0x40) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout > MAX_TIMEOUT) {
dev_dbg(&adap->dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x10) {
dev_dbg(&adap->dev, "Error: Failed bus transaction\n");
result = -ENXIO;
}
if (temp & 0x20) {
dev_err(&adap->dev, "Bus collision! SMBus may be locked until "
"next hard reset (or not...)\n");
/* Clock stops and slave is stuck in mid-transmission */
result = -EIO;
}
temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8);
if (temp != 0x00) {
sis5595_write(SMB_STS_LO, temp & 0xff);
sis5595_write(SMB_STS_HI, temp >> 8);
}
temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8);
if (temp != 0x00)
dev_dbg(&adap->dev, "Failed reset at end of transaction (%02x)\n", temp);
return result;
}
/* Return negative errno on error. */
static s32 sis5595_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
int status;
switch (size) {
case I2C_SMBUS_QUICK:
sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
size = SIS5595_QUICK;
break;
case I2C_SMBUS_BYTE:
sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
if (read_write == I2C_SMBUS_WRITE)
sis5595_write(SMB_CMD, command);
size = SIS5595_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
sis5595_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE)
sis5595_write(SMB_BYTE, data->byte);
size = SIS5595_BYTE_DATA;
break;
case I2C_SMBUS_PROC_CALL:
case I2C_SMBUS_WORD_DATA:
sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
sis5595_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE) {
sis5595_write(SMB_BYTE, data->word & 0xff);
sis5595_write(SMB_BYTE + 1,
(data->word & 0xff00) >> 8);
}
size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA;
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
sis5595_write(SMB_CTL_LO, ((size & 0x0E)));
status = sis5595_transaction(adap);
if (status)
return status;
if ((size != SIS5595_PROC_CALL) &&
((read_write == I2C_SMBUS_WRITE) || (size == SIS5595_QUICK)))
return 0;
switch (size) {
case SIS5595_BYTE:
case SIS5595_BYTE_DATA:
data->byte = sis5595_read(SMB_BYTE);
break;
case SIS5595_WORD_DATA:
case SIS5595_PROC_CALL:
data->word = sis5595_read(SMB_BYTE) + (sis5595_read(SMB_BYTE + 1) << 8);
break;
}
return 0;
}
static u32 sis5595_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_PROC_CALL;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sis5595_access,
.functionality = sis5595_func,
};
static struct i2c_adapter sis5595_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static const struct pci_device_id sis5595_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, sis5595_ids);
static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int err;
if (sis5595_setup(dev)) {
dev_err(&dev->dev, "SIS5595 not detected, module not inserted.\n");
return -ENODEV;
}
/* set up the sysfs linkage to our parent device */
sis5595_adapter.dev.parent = &dev->dev;
snprintf(sis5595_adapter.name, sizeof(sis5595_adapter.name),
"SMBus SIS5595 adapter at %04x", sis5595_base + SMB_INDEX);
err = i2c_add_adapter(&sis5595_adapter);
if (err) {
release_region(sis5595_base + SMB_INDEX, 2);
return err;
}
/* Always return failure here. This is to allow other drivers to bind
* to this pci device. We don't really want to have control over the
* pci device, we only wanted to read as few register values from it.
*/
sis5595_pdev = pci_dev_get(dev);
return -ENODEV;
}
static struct pci_driver sis5595_driver = {
.name = "sis5595_smbus",
.id_table = sis5595_ids,
.probe = sis5595_probe,
};
static int __init i2c_sis5595_init(void)
{
return pci_register_driver(&sis5595_driver);
}
static void __exit i2c_sis5595_exit(void)
{
pci_unregister_driver(&sis5595_driver);
if (sis5595_pdev) {
i2c_del_adapter(&sis5595_adapter);
release_region(sis5595_base + SMB_INDEX, 2);
pci_dev_put(sis5595_pdev);
sis5595_pdev = NULL;
}
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
MODULE_DESCRIPTION("SIS5595 SMBus driver");
MODULE_LICENSE("GPL");
module_init(i2c_sis5595_init);
module_exit(i2c_sis5595_exit);
| gpl-2.0 |
ls2uper/linux | drivers/media/rc/ttusbir.c | 1714 | 10523 | /*
* TechnoTrend USB IR Receiver
*
* Copyright (C) 2012 Sean Young <sean@mess.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/slab.h>
#include <linux/leds.h>
#include <media/rc-core.h>
#define DRIVER_NAME "ttusbir"
#define DRIVER_DESC "TechnoTrend USB IR Receiver"
/*
* The Windows driver uses 8 URBS, the original lirc drivers has a
* configurable amount (2 default, 4 max). This device generates about 125
* messages per second (!), whether IR is idle or not.
*/
#define NUM_URBS 4
#define NS_PER_BYTE 62500
#define NS_PER_BIT (NS_PER_BYTE/8)
struct ttusbir {
struct rc_dev *rc;
struct device *dev;
struct usb_device *udev;
struct urb *urb[NUM_URBS];
struct led_classdev led;
struct urb *bulk_urb;
uint8_t bulk_buffer[5];
int bulk_out_endp, iso_in_endp;
bool led_on, is_led_on;
atomic_t led_complete;
char phys[64];
};
static enum led_brightness ttusbir_brightness_get(struct led_classdev *led_dev)
{
struct ttusbir *tt = container_of(led_dev, struct ttusbir, led);
return tt->led_on ? LED_FULL : LED_OFF;
}
static void ttusbir_set_led(struct ttusbir *tt)
{
int ret;
smp_mb();
if (tt->led_on != tt->is_led_on && tt->udev &&
atomic_add_unless(&tt->led_complete, 1, 1)) {
tt->bulk_buffer[4] = tt->is_led_on = tt->led_on;
ret = usb_submit_urb(tt->bulk_urb, GFP_ATOMIC);
if (ret) {
dev_warn(tt->dev, "failed to submit bulk urb: %d\n",
ret);
atomic_dec(&tt->led_complete);
}
}
}
static void ttusbir_brightness_set(struct led_classdev *led_dev, enum
led_brightness brightness)
{
struct ttusbir *tt = container_of(led_dev, struct ttusbir, led);
tt->led_on = brightness != LED_OFF;
ttusbir_set_led(tt);
}
/*
* The urb cannot be reused until the urb completes
*/
static void ttusbir_bulk_complete(struct urb *urb)
{
struct ttusbir *tt = urb->context;
atomic_dec(&tt->led_complete);
switch (urb->status) {
case 0:
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_dbg(tt->dev, "Error: urb status = %d\n", urb->status);
break;
}
ttusbir_set_led(tt);
}
/*
* The data is one bit per sample, a set bit signifying silence and samples
* being MSB first. Bit 0 can contain garbage so take it to be whatever
* bit 1 is, so we don't have unexpected edges.
*/
static void ttusbir_process_ir_data(struct ttusbir *tt, uint8_t *buf)
{
struct ir_raw_event rawir;
unsigned i, v, b;
bool event = false;
init_ir_raw_event(&rawir);
for (i = 0; i < 128; i++) {
v = buf[i] & 0xfe;
switch (v) {
case 0xfe:
rawir.pulse = false;
rawir.duration = NS_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
case 0:
rawir.pulse = true;
rawir.duration = NS_PER_BYTE;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
default:
/* one edge per byte */
if (v & 2) {
b = ffz(v | 1);
rawir.pulse = true;
} else {
b = ffs(v) - 1;
rawir.pulse = false;
}
rawir.duration = NS_PER_BIT * (8 - b);
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
rawir.pulse = !rawir.pulse;
rawir.duration = NS_PER_BIT * b;
if (ir_raw_event_store_with_filter(tt->rc, &rawir))
event = true;
break;
}
}
/* don't wakeup when there's nothing to do */
if (event)
ir_raw_event_handle(tt->rc);
}
static void ttusbir_urb_complete(struct urb *urb)
{
struct ttusbir *tt = urb->context;
int rc;
switch (urb->status) {
case 0:
ttusbir_process_ir_data(tt, urb->transfer_buffer);
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
case -EPIPE:
default:
dev_dbg(tt->dev, "Error: urb status = %d\n", urb->status);
break;
}
rc = usb_submit_urb(urb, GFP_ATOMIC);
if (rc && rc != -ENODEV)
dev_warn(tt->dev, "failed to resubmit urb: %d\n", rc);
}
static int ttusbir_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct ttusbir *tt;
struct usb_interface_descriptor *idesc;
struct usb_endpoint_descriptor *desc;
struct rc_dev *rc;
int i, j, ret;
int altsetting = -1;
tt = kzalloc(sizeof(*tt), GFP_KERNEL);
rc = rc_allocate_device();
if (!tt || !rc) {
ret = -ENOMEM;
goto out;
}
/* find the correct alt setting */
for (i = 0; i < intf->num_altsetting && altsetting == -1; i++) {
int max_packet, bulk_out_endp = -1, iso_in_endp = -1;
idesc = &intf->altsetting[i].desc;
for (j = 0; j < idesc->bNumEndpoints; j++) {
desc = &intf->altsetting[i].endpoint[j].desc;
max_packet = le16_to_cpu(desc->wMaxPacketSize);
if (usb_endpoint_dir_in(desc) &&
usb_endpoint_xfer_isoc(desc) &&
max_packet == 0x10)
iso_in_endp = j;
else if (usb_endpoint_dir_out(desc) &&
usb_endpoint_xfer_bulk(desc) &&
max_packet == 0x20)
bulk_out_endp = j;
if (bulk_out_endp != -1 && iso_in_endp != -1) {
tt->bulk_out_endp = bulk_out_endp;
tt->iso_in_endp = iso_in_endp;
altsetting = i;
break;
}
}
}
if (altsetting == -1) {
dev_err(&intf->dev, "cannot find expected altsetting\n");
ret = -ENODEV;
goto out;
}
tt->dev = &intf->dev;
tt->udev = interface_to_usbdev(intf);
tt->rc = rc;
ret = usb_set_interface(tt->udev, 0, altsetting);
if (ret)
goto out;
for (i = 0; i < NUM_URBS; i++) {
struct urb *urb = usb_alloc_urb(8, GFP_KERNEL);
void *buffer;
if (!urb) {
ret = -ENOMEM;
goto out;
}
urb->dev = tt->udev;
urb->context = tt;
urb->pipe = usb_rcvisocpipe(tt->udev, tt->iso_in_endp);
urb->interval = 1;
buffer = usb_alloc_coherent(tt->udev, 128, GFP_KERNEL,
&urb->transfer_dma);
if (!buffer) {
usb_free_urb(urb);
ret = -ENOMEM;
goto out;
}
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP | URB_ISO_ASAP;
urb->transfer_buffer = buffer;
urb->complete = ttusbir_urb_complete;
urb->number_of_packets = 8;
urb->transfer_buffer_length = 128;
for (j = 0; j < 8; j++) {
urb->iso_frame_desc[j].offset = j * 16;
urb->iso_frame_desc[j].length = 16;
}
tt->urb[i] = urb;
}
tt->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tt->bulk_urb) {
ret = -ENOMEM;
goto out;
}
tt->bulk_buffer[0] = 0xaa;
tt->bulk_buffer[1] = 0x01;
tt->bulk_buffer[2] = 0x05;
tt->bulk_buffer[3] = 0x01;
usb_fill_bulk_urb(tt->bulk_urb, tt->udev, usb_sndbulkpipe(tt->udev,
tt->bulk_out_endp), tt->bulk_buffer, sizeof(tt->bulk_buffer),
ttusbir_bulk_complete, tt);
tt->led.name = "ttusbir:green:power";
tt->led.default_trigger = "rc-feedback";
tt->led.brightness_set = ttusbir_brightness_set;
tt->led.brightness_get = ttusbir_brightness_get;
tt->is_led_on = tt->led_on = true;
atomic_set(&tt->led_complete, 0);
ret = led_classdev_register(&intf->dev, &tt->led);
if (ret)
goto out;
usb_make_path(tt->udev, tt->phys, sizeof(tt->phys));
rc->input_name = DRIVER_DESC;
rc->input_phys = tt->phys;
usb_to_input_id(tt->udev, &rc->input_id);
rc->dev.parent = &intf->dev;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protocols = RC_BIT_ALL;
rc->priv = tt;
rc->driver_name = DRIVER_NAME;
rc->map_name = RC_MAP_TT_1500;
rc->timeout = MS_TO_NS(100);
/*
* The precision is NS_PER_BIT, but since every 8th bit can be
* overwritten with garbage the accuracy is at best 2 * NS_PER_BIT.
*/
rc->rx_resolution = NS_PER_BIT;
ret = rc_register_device(rc);
if (ret) {
dev_err(&intf->dev, "failed to register rc device %d\n", ret);
goto out2;
}
usb_set_intfdata(intf, tt);
for (i = 0; i < NUM_URBS; i++) {
ret = usb_submit_urb(tt->urb[i], GFP_KERNEL);
if (ret) {
dev_err(tt->dev, "failed to submit urb %d\n", ret);
goto out3;
}
}
return 0;
out3:
rc_unregister_device(rc);
rc = NULL;
out2:
led_classdev_unregister(&tt->led);
out:
if (tt) {
for (i = 0; i < NUM_URBS && tt->urb[i]; i++) {
struct urb *urb = tt->urb[i];
usb_kill_urb(urb);
usb_free_coherent(tt->udev, 128, urb->transfer_buffer,
urb->transfer_dma);
usb_free_urb(urb);
}
usb_kill_urb(tt->bulk_urb);
usb_free_urb(tt->bulk_urb);
kfree(tt);
}
rc_free_device(rc);
return ret;
}
static void ttusbir_disconnect(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
struct usb_device *udev = tt->udev;
int i;
tt->udev = NULL;
rc_unregister_device(tt->rc);
led_classdev_unregister(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
usb_kill_urb(tt->urb[i]);
usb_free_coherent(udev, 128, tt->urb[i]->transfer_buffer,
tt->urb[i]->transfer_dma);
usb_free_urb(tt->urb[i]);
}
usb_kill_urb(tt->bulk_urb);
usb_free_urb(tt->bulk_urb);
usb_set_intfdata(intf, NULL);
kfree(tt);
}
static int ttusbir_suspend(struct usb_interface *intf, pm_message_t message)
{
struct ttusbir *tt = usb_get_intfdata(intf);
int i;
for (i = 0; i < NUM_URBS; i++)
usb_kill_urb(tt->urb[i]);
led_classdev_suspend(&tt->led);
usb_kill_urb(tt->bulk_urb);
return 0;
}
static int ttusbir_resume(struct usb_interface *intf)
{
struct ttusbir *tt = usb_get_intfdata(intf);
int i, rc;
tt->is_led_on = true;
led_classdev_resume(&tt->led);
for (i = 0; i < NUM_URBS; i++) {
rc = usb_submit_urb(tt->urb[i], GFP_KERNEL);
if (rc) {
dev_warn(tt->dev, "failed to submit urb: %d\n", rc);
break;
}
}
return rc;
}
static const struct usb_device_id ttusbir_table[] = {
{ USB_DEVICE(0x0b48, 0x2003) },
{ }
};
static struct usb_driver ttusbir_driver = {
.name = DRIVER_NAME,
.id_table = ttusbir_table,
.probe = ttusbir_probe,
.suspend = ttusbir_suspend,
.resume = ttusbir_resume,
.reset_resume = ttusbir_resume,
.disconnect = ttusbir_disconnect,
};
module_usb_driver(ttusbir_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Sean Young <sean@mess.org>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(usb, ttusbir_table);
| gpl-2.0 |
frankoid/kernel-enrc2b | net/mac80211/wep.c | 2994 | 9035 | /*
* Software WEP encryption implementation
* Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright 2003, Instant802 Networks, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/random.h>
#include <linux/compiler.h>
#include <linux/crc32.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "wep.h"
int ieee80211_wep_init(struct ieee80211_local *local)
{
/* start WEP IV from a random value */
get_random_bytes(&local->wep_iv, WEP_IV_LEN);
local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_tx_tfm)) {
local->wep_rx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_tx_tfm);
}
local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(local->wep_rx_tfm)) {
crypto_free_cipher(local->wep_tx_tfm);
local->wep_tx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_rx_tfm);
}
return 0;
}
void ieee80211_wep_free(struct ieee80211_local *local)
{
if (!IS_ERR(local->wep_tx_tfm))
crypto_free_cipher(local->wep_tx_tfm);
if (!IS_ERR(local->wep_rx_tfm))
crypto_free_cipher(local->wep_rx_tfm);
}
static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
{
/*
* Fluhrer, Mantin, and Shamir have reported weaknesses in the
* key scheduling algorithm of RC4. At least IVs (KeyByte + 3,
* 0xff, N) can be used to speedup attacks, so avoid using them.
*/
if ((iv & 0xff00) == 0xff00) {
u8 B = (iv >> 16) & 0xff;
if (B >= 3 && B < 3 + keylen)
return true;
}
return false;
}
static void ieee80211_wep_get_iv(struct ieee80211_local *local,
int keylen, int keyidx, u8 *iv)
{
local->wep_iv++;
if (ieee80211_wep_weak_iv(local->wep_iv, keylen))
local->wep_iv += 0x0100;
if (!iv)
return;
*iv++ = (local->wep_iv >> 16) & 0xff;
*iv++ = (local->wep_iv >> 8) & 0xff;
*iv++ = local->wep_iv & 0xff;
*iv++ = keyidx << 6;
}
static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
struct sk_buff *skb,
int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *newhdr;
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
skb_headroom(skb) < WEP_IV_LEN))
return NULL;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
}
/* Perform WEP encryption using given key. data buffer must have tailroom
* for 4-byte ICV. data_len must not include this ICV. Note: this function
* does _not_ add IV. data = RC4(data | CRC32(data)) */
int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 icv;
int i;
if (IS_ERR(tfm))
return -1;
icv = cpu_to_le32(~crc32_le(~0, data, data_len));
put_unaligned(icv, (__le32 *)(data + data_len));
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_encrypt_one(tfm, data + i, data + i);
return 0;
}
/* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the
* beginning of the buffer 4 bytes of extra space (ICV) in the end of the
* buffer will be added. Both IV and ICV will be transmitted, so the
* payload length increases with 8 bytes.
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
int ieee80211_wep_encrypt(struct ieee80211_local *local,
struct sk_buff *skb,
const u8 *key, int keylen, int keyidx)
{
u8 *iv;
size_t len;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
if (!iv)
return -1;
len = skb->len - (iv + WEP_IV_LEN - skb->data);
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, iv, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key, keylen);
/* Add room for ICV */
skb_put(skb, WEP_ICV_LEN);
return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
iv + WEP_IV_LEN, len);
}
/* Perform WEP decryption using given key. data buffer includes encrypted
* payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
* Return 0 on success and -1 on ICV mismatch. */
int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len)
{
__le32 crc;
int i;
if (IS_ERR(tfm))
return -1;
crypto_cipher_setkey(tfm, rc4key, klen);
for (i = 0; i < data_len + WEP_ICV_LEN; i++)
crypto_cipher_decrypt_one(tfm, data + i, data + i);
crc = cpu_to_le32(~crc32_le(~0, data, data_len));
if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
/* ICV mismatch */
return -1;
return 0;
}
/* Perform WEP decryption on given skb. Buffer includes whole WEP part of
* the frame: IV (4 bytes), encrypted payload (including SNAP header),
* ICV (4 bytes). skb->len includes both IV and ICV.
*
* Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
* failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload
* is moved to the beginning of the skb and skb length will be reduced.
*/
static int ieee80211_wep_decrypt(struct ieee80211_local *local,
struct sk_buff *skb,
struct ieee80211_key *key)
{
u32 klen;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
u8 keyidx;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
size_t len;
int ret = 0;
if (!ieee80211_has_protected(hdr->frame_control))
return -1;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
return -1;
len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
keyidx = skb->data[hdrlen + 3] >> 6;
if (!key || keyidx != key->conf.keyidx)
return -1;
klen = 3 + key->conf.keylen;
/* Prepend 24-bit IV to RC4 key */
memcpy(rc4key, skb->data + hdrlen, 3);
/* Copy rest of the WEP key (the secret part) */
memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
skb->data + hdrlen + WEP_IV_LEN,
len))
ret = -1;
/* Trim ICV */
skb_trim(skb, skb->len - WEP_ICV_LEN);
/* Remove IV */
memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
skb_pull(skb, WEP_IV_LEN);
return ret;
}
bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
u8 *ivpos;
u32 iv;
if (!ieee80211_has_protected(hdr->frame_control))
return false;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
ivpos = skb->data + hdrlen;
iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
return ieee80211_wep_weak_iv(iv, key->conf.keylen);
}
ieee80211_rx_result
ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control))
return RX_CONTINUE;
if (!(status->flag & RX_FLAG_DECRYPTED)) {
if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
}
return RX_CONTINUE;
}
static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!info->control.hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
} else if (info->control.hw_key->flags &
IEEE80211_KEY_FLAG_GENERATE_IV) {
if (!ieee80211_wep_add_iv(tx->local, skb,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
}
return 0;
}
ieee80211_tx_result
ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
ieee80211_tx_set_protected(tx);
skb = tx->skb;
do {
if (wep_encrypt_skb(tx, skb) < 0) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
return TX_DROP;
}
} while ((skb = skb->next));
return TX_CONTINUE;
}
| gpl-2.0 |
TimesysGit/advantech-linux | sound/pci/ice1712/hoontech.c | 3250 | 10990 | /*
* ALSA driver for ICEnsemble ICE1712 (Envy24)
*
* Lowlevel functions for Hoontech STDSP24
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include "ice1712.h"
#include "hoontech.h"
/* Hoontech-specific setting */
struct hoontech_spec {
unsigned char boxbits[4];
unsigned int config;
unsigned short boxconfig[4];
};
static void snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, unsigned char byte)
{
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte &= ~ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
}
static void snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
mutex_unlock(&ice->gpio_mutex);
}
static void snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MUTE(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_INSEL(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
/* prepare for write */
if (chn == 3)
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
if (chn == 3) {
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
} else {
switch (chn) {
case 0: ICE1712_STDSP24_1_CHN1(spec->boxbits, 0); break;
case 1: ICE1712_STDSP24_1_CHN2(spec->boxbits, 0); break;
case 2: ICE1712_STDSP24_1_CHN3(spec->boxbits, 0); break;
}
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
}
udelay(100);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, master);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
udelay(100);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mdelay(10);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static int snd_ice1712_hoontech_init(struct snd_ice1712 *ice)
{
struct hoontech_spec *spec;
int box, chn;
ice->num_total_dacs = 8;
ice->num_total_adcs = 8;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
ice->spec = spec;
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 0);
ICE1712_STDSP24_CLOCK(spec->boxbits, 0, 1);
ICE1712_STDSP24_0_BOX(spec->boxbits, 0);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 1);
ICE1712_STDSP24_CLOCK(spec->boxbits, 1, 1);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 2);
ICE1712_STDSP24_CLOCK(spec->boxbits, 2, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 3);
ICE1712_STDSP24_CLOCK(spec->boxbits, 3, 1);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, 0);
ICE1712_STDSP24_3_MUTE(spec->boxbits, 1);
ICE1712_STDSP24_3_INSEL(spec->boxbits, 0);
/* let's go - activate only functions in first box */
spec->config = 0;
/* ICE1712_STDSP24_MUTE |
ICE1712_STDSP24_INSEL |
ICE1712_STDSP24_DAREAR; */
/* These boxconfigs have caused problems in the past.
* The code is not optimal, but should now enable a working config to
* be achieved.
* ** MIDI IN can only be configured on one box **
* ICE1712_STDSP24_BOX_MIDI1 needs to be set for that box.
* Tests on a ADAC2000 box suggest the box config flags do not
* work as would be expected, and the inputs are crossed.
* Setting ICE1712_STDSP24_BOX_MIDI1 and ICE1712_STDSP24_BOX_MIDI2
* on the same box connects MIDI-In to both 401 uarts; both outputs
* are then active on all boxes.
* The default config here sets up everything on the first box.
* Alan Horstmann 5.2.2008
*/
spec->boxconfig[0] = ICE1712_STDSP24_BOX_CHN1 |
ICE1712_STDSP24_BOX_CHN2 |
ICE1712_STDSP24_BOX_CHN3 |
ICE1712_STDSP24_BOX_CHN4 |
ICE1712_STDSP24_BOX_MIDI1 |
ICE1712_STDSP24_BOX_MIDI2;
spec->boxconfig[1] =
spec->boxconfig[2] =
spec->boxconfig[3] = 0;
snd_ice1712_stdsp24_darear(ice,
(spec->config & ICE1712_STDSP24_DAREAR) ? 1 : 0);
snd_ice1712_stdsp24_mute(ice,
(spec->config & ICE1712_STDSP24_MUTE) ? 1 : 0);
snd_ice1712_stdsp24_insel(ice,
(spec->config & ICE1712_STDSP24_INSEL) ? 1 : 0);
for (box = 0; box < 4; box++) {
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI2)
snd_ice1712_stdsp24_midi2(ice, 1);
for (chn = 0; chn < 4; chn++)
snd_ice1712_stdsp24_box_channel(ice, box, chn,
(spec->boxconfig[box] & (1 << chn)) ? 1 : 0);
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI1)
snd_ice1712_stdsp24_box_midi(ice, box, 1);
}
return 0;
}
/*
* AK4524 access
*/
/* start callback for STDSP24 with modified hardware */
static void stdsp24_ak4524_lock(struct snd_akm4xxx *ak, int chip)
{
struct snd_ice1712 *ice = ak->private_data[0];
unsigned char tmp;
snd_ice1712_save_gpio_status(ice);
tmp = ICE1712_STDSP24_SERIAL_DATA |
ICE1712_STDSP24_SERIAL_CLOCK |
ICE1712_STDSP24_AK4524_CS;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION,
ice->gpio.direction | tmp);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ~tmp);
}
static int snd_ice1712_value_init(struct snd_ice1712 *ice)
{
/* Hoontech STDSP24 with modified hardware */
static struct snd_akm4xxx akm_stdsp24_mv = {
.num_adcs = 2,
.num_dacs = 2,
.type = SND_AK4524,
.ops = {
.lock = stdsp24_ak4524_lock
}
};
static struct snd_ak4xxx_private akm_stdsp24_mv_priv = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_STDSP24_SERIAL_DATA,
.clk_mask = ICE1712_STDSP24_SERIAL_CLOCK,
.cs_mask = ICE1712_STDSP24_AK4524_CS,
.cs_addr = ICE1712_STDSP24_AK4524_CS,
.cs_none = 0,
.add_flags = 0,
};
int err;
struct snd_akm4xxx *ak;
/* set the analog DACs */
ice->num_total_dacs = 2;
/* set the analog ADCs */
ice->num_total_adcs = 2;
/* analog section */
ak = ice->akm = kmalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
if (! ak)
return -ENOMEM;
ice->akm_codecs = 1;
err = snd_ice1712_akm4xxx_init(ak, &akm_stdsp24_mv, &akm_stdsp24_mv_priv, ice);
if (err < 0)
return err;
/* ak4524 controls */
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
return 0;
}
static int snd_ice1712_ez8_init(struct snd_ice1712 *ice)
{
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ice->eeprom.gpiomask);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION, ice->eeprom.gpiodir);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, ice->eeprom.gpiostate);
return 0;
}
/* entry point */
struct snd_ice1712_card_info snd_ice1712_hoontech_cards[] = {
{
.subvendor = ICE1712_SUBDEVICE_STDSP24,
.name = "Hoontech SoundTrack Audio DSP24",
.model = "dsp24",
.chip_init = snd_ice1712_hoontech_init,
.mpu401_1_name = "MIDI-1 Hoontech/STA DSP24",
.mpu401_2_name = "MIDI-2 Hoontech/STA DSP24",
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_VALUE, /* a dummy id */
.name = "Hoontech SoundTrack Audio DSP24 Value",
.model = "dsp24_value",
.chip_init = snd_ice1712_value_init,
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_MEDIA7_1,
.name = "Hoontech STA DSP24 Media 7.1",
.model = "dsp24_71",
.chip_init = snd_ice1712_hoontech_init,
},
{
.subvendor = ICE1712_SUBDEVICE_EVENT_EZ8, /* a dummy id */
.name = "Event Electronics EZ8",
.model = "ez8",
.chip_init = snd_ice1712_ez8_init,
},
{ } /* terminator */
};
| gpl-2.0 |
cneira/ebpf-backports | linux-3.10.0-514.21.1.el7.x86_64/drivers/platform/x86/xo1-rfkill.c | 3250 | 1743 | /*
* Support for rfkill through the OLPC XO-1 laptop embedded controller
*
* Copyright (C) 2010 One Laptop per Child
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/olpc-ec.h>
static bool card_blocked;
static int rfkill_set_block(void *data, bool blocked)
{
unsigned char cmd;
int r;
if (blocked == card_blocked)
return 0;
if (blocked)
cmd = EC_WLAN_ENTER_RESET;
else
cmd = EC_WLAN_LEAVE_RESET;
r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
if (r == 0)
card_blocked = blocked;
return r;
}
static const struct rfkill_ops rfkill_ops = {
.set_block = rfkill_set_block,
};
static int xo1_rfkill_probe(struct platform_device *pdev)
{
struct rfkill *rfk;
int r;
rfk = rfkill_alloc(pdev->name, &pdev->dev, RFKILL_TYPE_WLAN,
&rfkill_ops, NULL);
if (!rfk)
return -ENOMEM;
r = rfkill_register(rfk);
if (r) {
rfkill_destroy(rfk);
return r;
}
platform_set_drvdata(pdev, rfk);
return 0;
}
static int xo1_rfkill_remove(struct platform_device *pdev)
{
struct rfkill *rfk = platform_get_drvdata(pdev);
rfkill_unregister(rfk);
rfkill_destroy(rfk);
return 0;
}
static struct platform_driver xo1_rfkill_driver = {
.driver = {
.name = "xo1-rfkill",
.owner = THIS_MODULE,
},
.probe = xo1_rfkill_probe,
.remove = xo1_rfkill_remove,
};
module_platform_driver(xo1_rfkill_driver);
MODULE_AUTHOR("Daniel Drake <dsd@laptop.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:xo1-rfkill");
| gpl-2.0 |
SimpleAOSP-Kernel/kernel_flo | drivers/net/vmxnet3/vmxnet3_drv.c | 4018 | 88753 | /*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
*
*/
#include <linux/module.h>
#include <net/ip6_checksum.h>
#include "vmxnet3_int.h"
char vmxnet3_driver_name[] = "vmxnet3";
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
/*
* PCI Device ID Table
* Last entry must be all 0s
*/
static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static atomic_t devices_found;
#define VMXNET3_MAX_DEVICES 10
static int enable_mq = 1;
static int irq_share_mode;
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
/*
* Enable/Disable the given intr
*/
static void
vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
}
static void
vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
}
/*
* Enable/Disable all intrs used by the device
*/
static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_enable_intr(adapter, i);
adapter->shared->devRead.intrConf.intrCtrl &=
cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
}
static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
adapter->shared->devRead.intrConf.intrCtrl |=
cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_disable_intr(adapter, i);
}
static void
vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
{
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
}
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
return tq->stopped;
}
static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
}
static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
/*
* Check the link state. This may start or stop the tx queue.
*/
static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
int i;
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
adapter->netdev->name, adapter->link_speed);
if (!netif_carrier_ok(adapter->netdev))
netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_start(&adapter->tx_queue[i],
adapter);
}
} else {
printk(KERN_INFO "%s: NIC Link is Down\n",
adapter->netdev->name);
if (netif_carrier_ok(adapter->netdev))
netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
}
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
int i;
unsigned long flags;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
vmxnet3_ack_events(adapter, events);
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
vmxnet3_check_link(adapter, true);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tqd_start[i].status.stopped)
dev_err(&adapter->netdev->dev,
"%s: tq[%d] error 0x%x\n",
adapter->netdev->name, i, le32_to_cpu(
adapter->tqd_start[i].status.error));
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rqd_start[i].status.stopped)
dev_err(&adapter->netdev->dev,
"%s: rq[%d] error 0x%x\n",
adapter->netdev->name, i,
adapter->rqd_start[i].status.error);
schedule_work(&adapter->work);
}
}
#ifdef __BIG_ENDIAN_BITFIELD
/*
* The device expects the bitfields in shared structures to be written in
* little endian. When CPU is big endian, the following routines are used to
* correctly read and write into ABI.
* The general technique used here is : double word bitfields are defined in
* opposite order for big endian architecture. Then before reading them in
* driver the complete double word is translated using le32_to_cpu. Similarly
* After the driver writes into bitfields, cpu_to_le32 is used to translate the
* double words into required format.
* In order to avoid touching bits in shared structure more than once, temporary
* descriptors are used. These are passed as srcDesc to following functions.
*/
static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
struct Vmxnet3_RxDesc *dstDesc)
{
u32 *src = (u32 *)srcDesc + 2;
u32 *dst = (u32 *)dstDesc + 2;
dstDesc->addr = le64_to_cpu(srcDesc->addr);
*dst = le32_to_cpu(*src);
dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
}
static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
struct Vmxnet3_TxDesc *dstDesc)
{
int i;
u32 *src = (u32 *)(srcDesc + 1);
u32 *dst = (u32 *)(dstDesc + 1);
/* Working backwards so that the gen bit is set at the end. */
for (i = 2; i > 0; i--) {
src--;
dst--;
*dst = cpu_to_le32(*src);
}
}
static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
struct Vmxnet3_RxCompDesc *dstDesc)
{
int i = 0;
u32 *src = (u32 *)srcDesc;
u32 *dst = (u32 *)dstDesc;
for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
*dst = le32_to_cpu(*src);
src++;
dst++;
}
}
/* Used to read bitfield values from double words. */
static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
{
u32 temp = le32_to_cpu(*bitfield);
u32 mask = ((1 << size) - 1) << pos;
temp &= mask;
temp >>= pos;
return temp;
}
#endif /* __BIG_ENDIAN_BITFIELD */
#ifdef __BIG_ENDIAN_BITFIELD
# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
VMXNET3_TCD_GEN_SIZE)
# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
(dstrcd) = (tmp); \
vmxnet3_RxCompToCPU((rcd), (tmp)); \
} while (0)
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
(dstrxd) = (tmp); \
vmxnet3_RxDescToCPU((rxd), (tmp)); \
} while (0)
#else
# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
#endif /* __BIG_ENDIAN_BITFIELD */
static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
if (tbi->map_type == VMXNET3_MAP_SINGLE)
pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE)
pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
}
static int
vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
{
struct sk_buff *skb;
int entries = 0;
/* no out of order completion */
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
skb = tq->buf_info[eop_idx].skb;
BUG_ON(skb == NULL);
tq->buf_info[eop_idx].skb = NULL;
VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
while (tq->tx_ring.next2comp != eop_idx) {
vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
pdev);
/* update next2comp w/o tx_lock. Since we are marking more,
* instead of less, tx ring entries avail, the worst case is
* that the tx routine incorrectly re-queues a pkt due to
* insufficient tx ring entries.
*/
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
entries++;
}
dev_kfree_skb_any(skb);
return entries;
}
static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int completed = 0;
union Vmxnet3_GenericDesc *gdesc;
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
&gdesc->tcd), tq, adapter->pdev,
adapter);
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
}
if (completed) {
spin_lock(&tq->tx_lock);
if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
netif_carrier_ok(adapter->netdev))) {
vmxnet3_tq_wake(tq, adapter);
}
spin_unlock(&tq->tx_lock);
}
return completed;
}
static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int i;
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
struct vmxnet3_tx_buf_info *tbi;
tbi = tq->buf_info + tq->tx_ring.next2comp;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
}
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
}
/* sanity check, verify all buffers are indeed unmapped and freed */
for (i = 0; i < tq->tx_ring.size; i++) {
BUG_ON(tq->buf_info[i].skb != NULL ||
tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
}
tq->tx_ring.gen = VMXNET3_INIT_GEN;
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
tq->comp_ring.next2proc = 0;
}
static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
if (tq->tx_ring.base) {
pci_free_consistent(adapter->pdev, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc),
tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
pci_free_consistent(adapter->pdev, tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc),
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->comp_ring.base) {
pci_free_consistent(adapter->pdev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
kfree(tq->buf_info);
tq->buf_info = NULL;
}
/* Destroy all tx queues */
void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
}
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int i;
/* reset the tx ring contents to 0 and reset the tx ring states */
memset(tq->tx_ring.base, 0, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc));
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN;
memset(tq->data_ring.base, 0, tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc));
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
tq->comp_ring.next2proc = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset the bookkeeping data */
memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
for (i = 0; i < tq->tx_ring.size; i++)
tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
/* stats are not reset */
}
static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
* sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA);
if (!tq->tx_ring.base) {
printk(KERN_ERR "%s: failed to allocate tx ring\n",
adapter->netdev->name);
goto err;
}
tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
tq->data_ring.size *
sizeof(struct Vmxnet3_TxDataDesc),
&tq->data_ring.basePA);
if (!tq->data_ring.base) {
printk(KERN_ERR "%s: failed to allocate data ring\n",
adapter->netdev->name);
goto err;
}
tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA);
if (!tq->comp_ring.base) {
printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
adapter->netdev->name);
goto err;
}
tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
GFP_KERNEL);
if (!tq->buf_info)
goto err;
return 0;
err:
vmxnet3_tq_destroy(tq, adapter);
return -ENOMEM;
}
static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
* of the rx queue and update the rx desc. stop after @num_to_alloc buffers
* are allocated or allocation fails
*/
static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
int num_to_alloc, struct vmxnet3_adapter *adapter)
{
int num_allocated = 0;
struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
rbi = rbi_base + ring->next2fill;
gd = ring->base + ring->next2fill;
if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
rbi->skb = dev_alloc_skb(rbi->len +
NET_IP_ALIGN);
if (unlikely(rbi->skb == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->skb->dev = adapter->netdev;
skb_reserve(rbi->skb, NET_IP_ALIGN);
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
} else {
/* rx buffer skipped by the device */
}
val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
} else {
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
rbi->len != PAGE_SIZE);
if (rbi->page == NULL) {
rbi->page = alloc_page(GFP_ATOMIC);
if (unlikely(rbi->page == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->dma_addr = pci_map_page(adapter->pdev,
rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
} else {
/* rx buffers skipped by the device */
}
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
}
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
/* Fill the last buffer but dont mark it ready, or else the
* device will think that the queue is full */
if (num_allocated == num_to_alloc)
break;
gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
rq->uncommitted[ring_idx] += num_allocated;
dev_dbg(&adapter->netdev->dev,
"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
"%u, uncommitted %u\n", num_allocated, ring->next2fill,
ring->next2comp, rq->uncommitted[ring_idx]);
/* so that the device can distinguish a full ring and an empty ring */
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
return num_allocated;
}
static void
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi)
{
struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
skb_shinfo(skb)->nr_frags;
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
__skb_frag_set_page(frag, rbi->page);
frag->page_offset = 0;
skb_frag_size_set(frag, rcd->len);
skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
static void
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
struct vmxnet3_adapter *adapter)
{
u32 dw2, len;
unsigned long buf_offset;
int i;
union Vmxnet3_GenericDesc *gdesc;
struct vmxnet3_tx_buf_info *tbi = NULL;
BUG_ON(ctx->copy_size > skb_headlen(skb));
/* use the previous gen bit for the SOP desc */
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
gdesc = ctx->sop_txd; /* both loops below can be skipped */
/* no need to map the buffer if headers are copied */
if (ctx->copy_size) {
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
tq->tx_ring.next2fill *
sizeof(struct Vmxnet3_TxDataDesc));
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
ctx->sop_txd->dword[3] = 0;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
dev_dbg(&adapter->netdev->dev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill,
le64_to_cpu(ctx->sop_txd->txd.addr),
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
/* use the right gen for non-SOP desc */
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
}
/* linear part can use multiple tx desc if it's big */
len = skb_headlen(skb) - ctx->copy_size;
buf_offset = ctx->copy_size;
while (len) {
u32 buf_size;
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
buf_size = len;
dw2 |= len;
} else {
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
/* spec says that for TxDesc.len, 0 == 2^14 */
}
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = pci_map_single(adapter->pdev,
skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE);
tbi->len = buf_size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
len -= buf_size;
buf_offset += buf_size;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
tbi->len = skb_frag_size(frag);
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
gdesc->dword[3] = 0;
dev_dbg(&adapter->netdev->dev,
"txd[%u]: 0x%llu %u %u\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
}
ctx->eop_txd = gdesc;
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
}
/* Init all tx queues */
static void
vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
}
/*
* parse and copy relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
*
* Returns:
* -1: error happens during parsing
* 0: protocol headers parsed, but too big to be copied
* 1: protocol headers parsed and copied
*
* Other effects:
* 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied
* 3. the portion copied is guaranteed to be in the linear part
*
*/
static int
vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_TxDataDesc *tdd;
if (ctx->mss) { /* TSO */
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
ctx->l4_hdr_size = tcp_hdrlen(skb);
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
if (ctx->ipv4) {
const struct iphdr *iph = ip_hdr(skb);
if (iph->protocol == IPPROTO_TCP)
ctx->l4_hdr_size = tcp_hdrlen(skb);
else if (iph->protocol == IPPROTO_UDP)
ctx->l4_hdr_size = sizeof(struct udphdr);
else
ctx->l4_hdr_size = 0;
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
}
ctx->copy_size = min(ctx->eth_ip_hdr_size +
ctx->l4_hdr_size, skb->len);
} else {
ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
, skb_headlen(skb));
}
/* make sure headers are accessible directly */
if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
goto err;
}
if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
tq->stats.oversized_hdr++;
ctx->copy_size = 0;
return 0;
}
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
memcpy(tdd->data, skb->data, ctx->copy_size);
dev_dbg(&adapter->netdev->dev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
return 1;
err:
return -1;
}
static void
vmxnet3_prepare_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
struct tcphdr *tcph = tcp_hdr(skb);
if (ctx->ipv4) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else {
struct ipv6hdr *iph = ipv6_hdr(skb);
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
IPPROTO_TCP, 0);
}
}
/*
* Transmits a pkt thru a given tq
* Returns:
* NETDEV_TX_OK: descriptors are setup successfully
* NETDEV_TX_OK: error occurred, the pkt is dropped
* NETDEV_TX_BUSY: tx ring is full, queue is stopped
*
* Side-effects:
* 1. tx ring may be changed
* 2. tq stats may be updated accordingly
* 3. shared->txNumDeferred may be updated
*/
static int
vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter, struct net_device *netdev)
{
int ret;
u32 count;
unsigned long flags;
struct vmxnet3_tx_ctx ctx;
union Vmxnet3_GenericDesc *gdesc;
#ifdef __BIG_ENDIAN_BITFIELD
/* Use temporary descriptor to avoid touching bits multiple times */
union Vmxnet3_GenericDesc tempTxDesc;
#endif
/* conservatively estimate # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
if (skb_header_cloned(skb)) {
if (unlikely(pskb_expand_head(skb, 0, 0,
GFP_ATOMIC) != 0)) {
tq->stats.drop_tso++;
goto drop_pkt;
}
tq->stats.copy_skb_header++;
}
vmxnet3_prepare_tso(skb, &ctx);
} else {
if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
/* non-tso pkts must not use more than
* VMXNET3_MAX_TXD_PER_PKT entries
*/
if (skb_linearize(skb) != 0) {
tq->stats.drop_too_many_frags++;
goto drop_pkt;
}
tq->stats.linearized++;
/* recalculate the # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
}
}
spin_lock_irqsave(&tq->tx_lock, flags);
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
dev_dbg(&adapter->netdev->dev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
vmxnet3_tq_stop(tq, adapter);
spin_unlock_irqrestore(&tq->tx_lock, flags);
return NETDEV_TX_BUSY;
}
ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
if (ctx.mss) {
if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
VMXNET3_MAX_TX_BUF_SIZE)) {
goto hdr_too_big;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (unlikely(ctx.eth_ip_hdr_size +
skb->csum_offset >
VMXNET3_MAX_CSUM_OFFSET)) {
goto hdr_too_big;
}
}
}
} else {
tq->stats.drop_hdr_inspect_err++;
goto unlock_drop_pkt;
}
/* fill tx descs related to addr & len */
vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
/* setup the EOP desc */
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
/* setup the SOP desc */
#ifdef __BIG_ENDIAN_BITFIELD
gdesc = &tempTxDesc;
gdesc->dword[2] = ctx.sop_txd->dword[2];
gdesc->dword[3] = ctx.sop_txd->dword[3];
#else
gdesc = ctx.sop_txd;
#endif
if (ctx.mss) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
gdesc->txd.om = VMXNET3_OM_TSO;
gdesc->txd.msscof = ctx.mss;
le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gdesc->txd.hlen = ctx.eth_ip_hdr_size;
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.msscof = ctx.eth_ip_hdr_size +
skb->csum_offset;
} else {
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
}
le32_add_cpu(&tq->shared->txNumDeferred, 1);
}
if (vlan_tx_tag_present(skb)) {
gdesc->txd.ti = 1;
gdesc->txd.tci = vlan_tx_tag_get(skb);
}
/* finally flips the GEN bit of the SOP desc. */
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
#ifdef __BIG_ENDIAN_BITFIELD
/* Finished updating in bitfields of Tx Desc, so write them in original
* place.
*/
vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
(struct Vmxnet3_TxDesc *)ctx.sop_txd);
gdesc = ctx.sop_txd;
#endif
dev_dbg(&adapter->netdev->dev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
spin_unlock_irqrestore(&tq->tx_lock, flags);
if (le32_to_cpu(tq->shared->txNumDeferred) >=
le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_TXPROD + tq->qid * 8,
tq->tx_ring.next2fill);
}
return NETDEV_TX_OK;
hdr_too_big:
tq->stats.drop_oversized_hdr++;
unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
return vmxnet3_tq_xmit(skb,
&adapter->tx_queue[skb->queue_mapping],
adapter, netdev);
}
static void
vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
struct sk_buff *skb,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
/* typical case: TCP/UDP over IP and both csums are correct */
if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
VMXNET3_RCD_CSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
skb->csum = htons(gdesc->rcd.csum);
skb->ip_summed = CHECKSUM_PARTIAL;
} else {
skb_checksum_none_assert(skb);
}
}
} else {
skb_checksum_none_assert(skb);
}
}
static void
vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
{
rq->stats.drop_err++;
if (!rcd->fcs)
rq->stats.drop_fcs++;
rq->stats.drop_total++;
/*
* We do not unmap and chain the rx buffer to the skb.
* We basically pretend this buffer is not used and will be recycled
* by vmxnet3_rq_alloc_rx_buf()
*/
/*
* ctx->skb may be NULL if this is the first and the only one
* desc for the pkt
*/
if (ctx->skb)
dev_kfree_skb_irq(ctx->skb);
ctx->skb = NULL;
}
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
static const u32 rxprod_reg[2] = {
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
u32 num_rxd = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxCmdDesc;
struct Vmxnet3_RxCompDesc rxComp;
#endif
vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb, *new_skb = NULL;
struct page *new_page = NULL;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
struct vmxnet3_cmd_ring *ring = NULL;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
num_rxd++;
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
BUG_ON(rxd->addr != rbi->dma_addr ||
rxd->len != rbi->len);
if (unlikely(rcd->eop && rcd->err)) {
vmxnet3_rx_error(rq, rcd, ctx, adapter);
goto rcd_done;
}
if (rcd->sop) { /* first buf of the pkt */
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
rcd->rqID != rq->qid);
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
dev_dbg(&adapter->netdev->dev,
"rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
}
skip_page_frags = false;
ctx->skb = rbi->skb;
new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
*/
rq->stats.rx_buf_alloc_failure++;
ctx->skb = NULL;
rq->stats.drop_total++;
skip_page_frags = true;
goto rcd_done;
}
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
skb_put(ctx->skb, rcd->len);
/* Immediate refill */
new_skb->dev = adapter->netdev;
skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
} else {
BUG_ON(ctx->skb == NULL && !skip_page_frags);
/* non SOP buffer must be type 1 in most cases */
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
/* If an sop buffer was dropped, skip all
* following non-sop fragments. They will be reused.
*/
if (skip_page_frags)
goto rcd_done;
new_page = alloc_page(GFP_ATOMIC);
if (unlikely(new_page == NULL)) {
/* Replacement page frag could not be allocated.
* Reuse this page. Drop the pkt and free the
* skb which contained this page as a frag. Skip
* processing all the following non-sop frags.
*/
rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb);
ctx->skb = NULL;
skip_page_frags = true;
goto rcd_done;
}
if (rcd->len) {
pci_unmap_page(adapter->pdev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
vmxnet3_append_frag(ctx->skb, rcd, rbi);
}
/* Immediate refill */
rbi->page = new_page;
rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
}
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
if (unlikely(rcd->ts))
__vlan_hwaccel_put_tag(skb, rcd->tci);
if (adapter->netdev->features & NETIF_F_LRO)
netif_receive_skb(skb);
else
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
}
rcd_done:
/* device may have skipped some rx descs */
ring->next2comp = idx;
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
while (num_to_alloc) {
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
&rxCmdDesc);
BUG_ON(!rxd->addr);
/* Recv desc is ready to be used by the device */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
num_to_alloc--;
}
/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
vmxnet3_getRxComp(rcd,
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
return num_rxd;
}
static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxDesc;
#endif
vmxnet3_getRxDesc(rxd,
&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) {
pci_unmap_single(adapter->pdev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) {
pci_unmap_page(adapter->pdev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL;
}
}
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq->rx_ring[ring_idx].next2fill =
rq->rx_ring[ring_idx].next2comp = 0;
rq->uncommitted[ring_idx] = 0;
}
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
}
static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
}
void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i;
int j;
/* all rx buffers must have already been freed */
for (i = 0; i < 2; i++) {
if (rq->buf_info[i]) {
for (j = 0; j < rq->rx_ring[i].size; j++)
BUG_ON(rq->buf_info[i][j].page != NULL);
}
}
kfree(rq->buf_info[0]);
for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) {
pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
* sizeof(struct Vmxnet3_RxDesc),
rq->rx_ring[i].base,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
rq->buf_info[i] = NULL;
}
if (rq->comp_ring.base) {
pci_free_consistent(adapter->pdev, rq->comp_ring.size *
sizeof(struct Vmxnet3_RxCompDesc),
rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL;
}
}
static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i;
/* initialize buf_info */
for (i = 0; i < rq->rx_ring[0].size; i++) {
/* 1st buf for a pkt is skbuff */
if (i % adapter->rx_buf_per_pkt == 0) {
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
rq->buf_info[0][i].len = adapter->skb_buf_size;
} else { /* subsequent bufs for a pkt is frag */
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[0][i].len = PAGE_SIZE;
}
}
for (i = 0; i < rq->rx_ring[1].size; i++) {
rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[1][i].len = PAGE_SIZE;
}
/* reset internal state and allocate buffers for both rings */
for (i = 0; i < 2; i++) {
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
rq->uncommitted[i] = 0;
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
}
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
/* at least has 1 rx buffer for the 1st ring */
return -ENOMEM;
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
sizeof(struct Vmxnet3_RxCompDesc));
rq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset rxctx */
rq->rx_ctx.skb = NULL;
/* stats are not reset */
return 0;
}
static int
vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
dev_err(&adapter->netdev->dev, "%s: failed to "
"initialize rx queue%i\n",
adapter->netdev->name, i);
break;
}
}
return err;
}
static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
size_t sz;
struct vmxnet3_rx_buf_info *bi;
for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
&rq->rx_ring[i].basePA);
if (!rq->rx_ring[i].base) {
printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
adapter->netdev->name, i);
goto err;
}
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
&rq->comp_ring.basePA);
if (!rq->comp_ring.base) {
printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
adapter->netdev->name);
goto err;
}
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
bi = kzalloc(sz, GFP_KERNEL);
if (!bi)
goto err;
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
return 0;
err:
vmxnet3_rq_destroy(rq, adapter);
return -ENOMEM;
}
static int
vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
dev_err(&adapter->netdev->dev,
"%s: failed to create rx queue%i\n",
adapter->netdev->name, i);
goto err_out;
}
}
return err;
err_out:
vmxnet3_rq_destroy_all(adapter);
return err;
}
/* Multiple queue aware polling function for tx and rx */
static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
int rcd_done = 0, i;
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
adapter, budget);
return rcd_done;
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
struct vmxnet3_rx_queue *rx_queue = container_of(napi,
struct vmxnet3_rx_queue, napi);
int rxd_done;
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
}
/*
* NAPI polling function for MSI-X mode with multiple Rx queues
* Returns the # of the NAPI credit consumed (# of rx descriptors processed)
*/
static int
vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
{
struct vmxnet3_rx_queue *rq = container_of(napi,
struct vmxnet3_rx_queue, napi);
struct vmxnet3_adapter *adapter = rq->adapter;
int rxd_done;
/* When sharing interrupt with corresponding tx queue, process
* tx completions in that queue as well
*/
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
struct vmxnet3_tx_queue *tq =
&adapter->tx_queue[rq - adapter->rx_queue];
vmxnet3_tq_tx_complete(tq, adapter);
}
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
napi_complete(napi);
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
}
#ifdef CONFIG_PCI_MSI
/*
* Handle completion interrupts on tx queues
* Returns whether or not the intr is handled
*/
static irqreturn_t
vmxnet3_msix_tx(int irq, void *data)
{
struct vmxnet3_tx_queue *tq = data;
struct vmxnet3_adapter *adapter = tq->adapter;
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
/* Handle the case where only one irq is allocate for all tx queues */
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
vmxnet3_tq_tx_complete(txq, adapter);
}
} else {
vmxnet3_tq_tx_complete(tq, adapter);
}
vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
return IRQ_HANDLED;
}
/*
* Handle completion interrupts on rx queues. Returns whether or not the
* intr is handled
*/
static irqreturn_t
vmxnet3_msix_rx(int irq, void *data)
{
struct vmxnet3_rx_queue *rq = data;
struct vmxnet3_adapter *adapter = rq->adapter;
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
napi_schedule(&rq->napi);
return IRQ_HANDLED;
}
/*
*----------------------------------------------------------------------------
*
* vmxnet3_msix_event --
*
* vmxnet3 msix event intr handler
*
* Result:
* whether or not the intr is handled
*
*----------------------------------------------------------------------------
*/
static irqreturn_t
vmxnet3_msix_event(int irq, void *data)
{
struct net_device *dev = data;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
if (adapter->shared->ecr)
vmxnet3_process_events(adapter);
vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
return IRQ_HANDLED;
}
#endif /* CONFIG_PCI_MSI */
/* Interrupt handler for vmxnet3 */
static irqreturn_t
vmxnet3_intr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
if (adapter->intr.type == VMXNET3_IT_INTX) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
return IRQ_NONE;
}
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_all_intrs(adapter);
napi_schedule(&adapter->rx_queue[0].napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_all_intrs(adapter);
vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
vmxnet3_enable_all_intrs(adapter);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
struct vmxnet3_intr *intr = &adapter->intr;
int err = 0, i;
int vector = 0;
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
for (i = 0; i < adapter->num_tx_queues; i++) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
adapter->netdev->name, vector);
err = request_irq(
intr->msix_entries[vector].vector,
vmxnet3_msix_tx, 0,
adapter->tx_queue[i].name,
&adapter->tx_queue[i]);
} else {
sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
adapter->netdev->name, vector);
}
if (err) {
dev_err(&adapter->netdev->dev,
"Failed to request irq for MSIX, %s, "
"error %d\n",
adapter->tx_queue[i].name, err);
return err;
}
/* Handle the case where only 1 MSIx was allocated for
* all tx queues */
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
for (; i < adapter->num_tx_queues; i++)
adapter->tx_queue[i].comp_ring.intr_idx
= vector;
vector++;
break;
} else {
adapter->tx_queue[i].comp_ring.intr_idx
= vector++;
}
}
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
vector = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
adapter->netdev->name, vector);
else
sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
adapter->netdev->name, vector);
err = request_irq(intr->msix_entries[vector].vector,
vmxnet3_msix_rx, 0,
adapter->rx_queue[i].name,
&(adapter->rx_queue[i]));
if (err) {
printk(KERN_ERR "Failed to request irq for MSIX"
", %s, error %d\n",
adapter->rx_queue[i].name, err);
return err;
}
adapter->rx_queue[i].comp_ring.intr_idx = vector++;
}
sprintf(intr->event_msi_vector_name, "%s-event-%d",
adapter->netdev->name, vector);
err = request_irq(intr->msix_entries[vector].vector,
vmxnet3_msix_event, 0,
intr->event_msi_vector_name, adapter->netdev);
intr->event_intr_idx = vector;
} else if (intr->type == VMXNET3_IT_MSI) {
adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
} else {
#endif
adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
#ifdef CONFIG_PCI_MSI
}
#endif
intr->num_intrs = vector + 1;
if (err) {
printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
":%d\n", adapter->netdev->name, intr->type, err);
} else {
/* Number of rx queues will not change after this */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq->qid = i;
rq->qid2 = i + adapter->num_rx_queues;
}
/* init our intr settings */
for (i = 0; i < intr->num_intrs; i++)
intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
if (adapter->intr.type != VMXNET3_IT_MSIX) {
adapter->intr.event_intr_idx = 0;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_queue[i].comp_ring.intr_idx = 0;
adapter->rx_queue[0].comp_ring.intr_idx = 0;
}
printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
"allocated\n", adapter->netdev->name, intr->type,
intr->mask_mode, intr->num_intrs);
}
return err;
}
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
struct vmxnet3_intr *intr = &adapter->intr;
BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
switch (intr->type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
int i, vector = 0;
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
for (i = 0; i < adapter->num_tx_queues; i++) {
free_irq(intr->msix_entries[vector++].vector,
&(adapter->tx_queue[i]));
if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
break;
}
}
for (i = 0; i < adapter->num_rx_queues; i++) {
free_irq(intr->msix_entries[vector++].vector,
&(adapter->rx_queue[i]));
}
free_irq(intr->msix_entries[vector].vector,
adapter->netdev);
BUG_ON(vector >= intr->num_intrs);
break;
}
#endif
case VMXNET3_IT_MSI:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
case VMXNET3_IT_INTX:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
default:
BUG_ON(true);
}
}
static void
vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
{
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
u16 vid;
/* allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
}
static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!(netdev->flags & IFF_PROMISC)) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
unsigned long flags;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
set_bit(vid, adapter->active_vlans);
return 0;
}
static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!(netdev->flags & IFF_PROMISC)) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
unsigned long flags;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
clear_bit(vid, adapter->active_vlans);
return 0;
}
static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
struct netdev_hw_addr *ha;
int i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
return buf;
}
static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
new_mode |= VMXNET3_RXM_PROMISC;
} else {
vmxnet3_restore_vlan(adapter);
}
if (netdev->flags & IFF_BROADCAST)
new_mode |= VMXNET3_RXM_BCAST;
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
printk(KERN_INFO "%s: failed to copy mcast list"
", setting ALL_MULTI\n", netdev->name);
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
if (!(new_mode & VMXNET3_RXM_MCAST)) {
rxConf->mfTableLen = 0;
rxConf->mfTablePA = 0;
}
spin_lock_irqsave(&adapter->cmd_lock, flags);
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RX_MODE);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
kfree(new_table);
}
void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
}
/*
* Set up driver_shared based on settings in adapter.
*/
static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
int i;
memset(shared, 0, sizeof(*shared));
/* driver settings */
shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
devRead->misc.driverInfo.version = cpu_to_le32(
VMXNET3_DRIVER_VERSION_NUM);
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
*((u32 *)&devRead->misc.driverInfo.gos));
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */
if (adapter->netdev->features & NETIF_F_RXCSUM)
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->netdev->features & NETIF_F_LRO) {
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
/* tx queue settings */
devRead->misc.numTxQueues = adapter->num_tx_queues;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
tqc = &adapter->tqd_start[i].conf;
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(
sizeof(struct vmxnet3_tx_buf_info) *
tqc->txRingSize);
tqc->intrIdx = tq->comp_ring.intr_idx;
}
/* rx queue settings */
devRead->misc.numRxQueues = adapter->num_rx_queues;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rqc = &adapter->rqd_start[i].conf;
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
rqc->ddPA = cpu_to_le64(virt_to_phys(
rq->buf_info));
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
rqc->ddLen = cpu_to_le32(
sizeof(struct vmxnet3_rx_buf_info) *
(rqc->rxRingSize[0] +
rqc->rxRingSize[1]));
rqc->intrIdx = rq->comp_ring.intr_idx;
}
#ifdef VMXNET3_RSS
memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
UPT1_RSS_HASH_TYPE_IPV4 |
UPT1_RSS_HASH_TYPE_TCP_IPV6 |
UPT1_RSS_HASH_TYPE_IPV6;
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf);
devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
}
#endif /* VMXNET3_RSS */
/* intr settings */
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
VMXNET3_IMM_AUTO;
devRead->intrConf.numIntrs = adapter->intr.num_intrs;
for (i = 0; i < adapter->intr.num_intrs; i++)
devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
/* the rest are already zeroed */
}
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
int err, i;
u32 ret;
unsigned long flags;
dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
adapter->skb_buf_size, adapter->rx_buf_per_pkt,
adapter->tx_queue[0].tx_ring.size,
adapter->rx_queue[0].rx_ring[0].size,
adapter->rx_queue[0].rx_ring[1].size);
vmxnet3_tq_init_all(adapter);
err = vmxnet3_rq_init_all(adapter);
if (err) {
printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
adapter->netdev->name, err);
goto rq_err;
}
err = vmxnet3_request_irqs(adapter);
if (err) {
printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
adapter->netdev->name, err);
goto irq_err;
}
vmxnet3_setup_driver_shared(adapter);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
adapter->shared_pa));
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
printk(KERN_ERR "Failed to activate dev %s: error %u\n",
adapter->netdev->name, ret);
err = -EINVAL;
goto activate_err;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
adapter->rx_queue[i].rx_ring[0].next2fill);
VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
(i * VMXNET3_REG_ALIGN)),
adapter->rx_queue[i].rx_ring[1].next2fill);
}
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
/*
* Check link state when first activating device. It will start the
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
activate_err:
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
vmxnet3_free_irqs(adapter);
irq_err:
rq_err:
/* free up buffers we allocated */
vmxnet3_rq_cleanup_all(adapter);
return err;
}
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
int i;
unsigned long flags;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_disable(&adapter->rx_queue[i].napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
vmxnet3_tq_cleanup_all(adapter);
vmxnet3_rq_cleanup_all(adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
{
u32 tmp;
tmp = *(u32 *)mac;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
tmp = (mac[5] << 8) | mac[4];
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
}
static int
vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
}
/* ==================== initialization and cleanup routines ============ */
static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
{
int err;
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev = adapter->pdev;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
pci_name(pdev), err);
return err;
}
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
printk(KERN_ERR "pci_set_consistent_dma_mask failed "
"for adapter %s\n", pci_name(pdev));
err = -EIO;
goto err_set_mask;
}
*dma64 = true;
} else {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_ERR "pci_set_dma_mask failed for adapter "
"%s\n", pci_name(pdev));
err = -EIO;
goto err_set_mask;
}
*dma64 = false;
}
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
printk(KERN_ERR "Failed to request region for adapter %s: "
"error %d\n", pci_name(pdev), err);
goto err_set_mask;
}
pci_set_master(pdev);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr0) {
printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
pci_name(pdev));
err = -EIO;
goto err_ioremap;
}
mmio_start = pci_resource_start(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr1) {
printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
pci_name(pdev));
err = -EIO;
goto err_bar1;
}
return 0;
err_bar1:
iounmap(adapter->hw_addr0);
err_ioremap:
pci_release_selected_regions(pdev, (1 << 2) - 1);
err_set_mask:
pci_disable_device(pdev);
return err;
}
static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
{
BUG_ON(!adapter->pdev);
iounmap(adapter->hw_addr0);
iounmap(adapter->hw_addr1);
pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
pci_disable_device(adapter->pdev);
}
static void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz, i, ring0_size, ring1_size, comp_size;
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
adapter->skb_buf_size = adapter->netdev->mtu +
VMXNET3_MAX_ETH_HDR_SIZE;
if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
adapter->rx_buf_per_pkt = 1;
} else {
adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
VMXNET3_MAX_ETH_HDR_SIZE;
adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
}
/*
* for simplicity, force the ring0 size to be a multiple of
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
ring0_size = adapter->rx_queue[0].rx_ring[0].size;
ring0_size = (ring0_size + sz - 1) / sz * sz;
ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
rq = &adapter->rx_queue[i];
rq->rx_ring[0].size = ring0_size;
rq->rx_ring[1].size = ring1_size;
rq->comp_ring.size = comp_size;
}
}
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size)
{
int err = 0, i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
tq->tx_ring.size = tx_ring_size;
tq->data_ring.size = tx_ring_size;
tq->comp_ring.size = tx_ring_size;
tq->shared = &adapter->tqd_start[i].ctrl;
tq->stopped = true;
tq->adapter = adapter;
tq->qid = i;
err = vmxnet3_tq_create(tq, adapter);
/*
* Too late to change num_tx_queues. We cannot do away with
* lesser number of queues than what we asked for
*/
if (err)
goto queue_err;
}
adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
/* qid and qid2 for rx queues will be assigned later when num
* of rx queues is finalized after allocating intrs */
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
printk(KERN_ERR "Could not allocate any rx"
"queues. Aborting.\n");
goto queue_err;
} else {
printk(KERN_INFO "Number of rx queues changed "
"to : %d.\n", i);
adapter->num_rx_queues = i;
err = 0;
break;
}
}
}
return err;
queue_err:
vmxnet3_tq_destroy_all(adapter);
return err;
}
static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
int err, i;
adapter = netdev_priv(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock);
err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE,
VMXNET3_DEF_RX_RING_SIZE);
if (err)
goto queue_err;
err = vmxnet3_activate_dev(adapter);
if (err)
goto activate_err;
return 0;
activate_err:
vmxnet3_rq_destroy_all(adapter);
vmxnet3_tq_destroy_all(adapter);
queue_err:
return err;
}
static int
vmxnet3_close(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
msleep(1);
vmxnet3_quiesce_dev(adapter);
vmxnet3_rq_destroy_all(adapter);
vmxnet3_tq_destroy_all(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
return 0;
}
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
int i;
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
*/
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
dev_close(adapter->netdev);
}
static int
vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
return -EINVAL;
netdev->mtu = new_mtu;
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
msleep(1);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
printk(KERN_ERR "%s: failed to re-create rx queues,"
" error %d. Closing it.\n", netdev->name, err);
goto out;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
printk(KERN_ERR "%s: failed to re-activate, error %d. "
"Closing it\n", netdev->name, err);
goto out;
}
}
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
if (err)
vmxnet3_force_close(adapter);
return err;
}
static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
{
struct net_device *netdev = adapter->netdev;
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO;
if (dma64)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
netdev_info(adapter->netdev,
"features: sg csum vlan jf tso tsoIPv6 lro%s\n",
dma64 ? " highDMA" : "");
}
static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
{
u32 tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
*(u32 *)mac = tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
mac[4] = tmp & 0xff;
mac[5] = (tmp >> 8) & 0xff;
}
#ifdef CONFIG_PCI_MSI
/*
* Enable MSIx vectors.
* Returns :
* 0 on successful enabling of required vectors,
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
* could be enabled.
* number of vectors which can be enabled otherwise (this number is smaller
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
int vectors)
{
int err = 0, vector_threshold;
vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
vectors);
if (!err) {
adapter->intr.num_intrs = vectors;
return 0;
} else if (err < 0) {
netdev_err(adapter->netdev,
"Failed to enable MSI-X, error: %d\n", err);
vectors = 0;
} else if (err < vector_threshold) {
break;
} else {
/* If fails to enable required number of MSI-x vectors
* try enabling minimum number of vectors required.
*/
netdev_err(adapter->netdev,
"Failed to enable %d MSI-X, trying %d instead\n",
vectors, vector_threshold);
vectors = vector_threshold;
}
}
netdev_info(adapter->netdev,
"Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
return err;
}
#endif /* CONFIG_PCI_MSI */
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
{
u32 cfg;
unsigned long flags;
/* intr settings */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
if (adapter->intr.type == VMXNET3_IT_AUTO) {
adapter->intr.type = VMXNET3_IT_MSIX;
}
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
int vector, err = 0;
adapter->intr.num_intrs = (adapter->share_intr ==
VMXNET3_INTR_TXSHARE) ? 1 :
adapter->num_tx_queues;
adapter->intr.num_intrs += (adapter->share_intr ==
VMXNET3_INTR_BUDDYSHARE) ? 0 :
adapter->num_rx_queues;
adapter->intr.num_intrs += 1; /* for link event */
adapter->intr.num_intrs = (adapter->intr.num_intrs >
VMXNET3_LINUX_MIN_MSIX_VECT
? adapter->intr.num_intrs :
VMXNET3_LINUX_MIN_MSIX_VECT);
for (vector = 0; vector < adapter->intr.num_intrs; vector++)
adapter->intr.msix_entries[vector].entry = vector;
err = vmxnet3_acquire_msix_vectors(adapter,
adapter->intr.num_intrs);
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
printk(KERN_ERR "Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
adapter->intr.num_intrs =
VMXNET3_LINUX_MIN_MSIX_VECT;
}
return;
}
if (!err)
return;
/* If we cannot allocate MSIx vectors use only one rx queue */
netdev_info(adapter->netdev,
"Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
err);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
int err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
}
}
#endif /* CONFIG_PCI_MSI */
adapter->num_rx_queues = 1;
printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
adapter->intr.num_intrs = 1;
}
static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
{
if (adapter->intr.type == VMXNET3_IT_MSIX)
pci_disable_msix(adapter->pdev);
else if (adapter->intr.type == VMXNET3_IT_MSI)
pci_disable_msi(adapter->pdev);
else
BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
}
static void
vmxnet3_tx_timeout(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
schedule_work(&adapter->work);
netif_wake_queue(adapter->netdev);
}
static void
vmxnet3_reset_work(struct work_struct *data)
{
struct vmxnet3_adapter *adapter;
adapter = container_of(data, struct vmxnet3_adapter, work);
/* if another thread is resetting the device, no need to proceed */
if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
return;
/* if the device is closed, we must leave it alone */
rtnl_lock();
if (netif_running(adapter->netdev)) {
printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
vmxnet3_activate_dev(adapter);
} else {
printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
}
rtnl_unlock();
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
static int __devinit
vmxnet3_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct net_device_ops vmxnet3_netdev_ops = {
.ndo_open = vmxnet3_open,
.ndo_stop = vmxnet3_close,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
.ndo_set_features = vmxnet3_set_features,
.ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_rx_mode = vmxnet3_set_mc,
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vmxnet3_netpoll,
#endif
};
int err;
bool dma64 = false; /* stupid gcc */
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
int size;
int num_tx_queues;
int num_rx_queues;
if (!pci_msi_enabled())
enable_mq = 0;
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
(int)num_online_cpus());
else
#endif
num_rx_queues = 1;
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
(int)num_online_cpus());
else
num_tx_queues = 1;
num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
num_tx_queues, num_rx_queues);
if (!netdev)
return -ENOMEM;
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
spin_lock_init(&adapter->cmd_lock);
adapter->shared = pci_alloc_consistent(adapter->pdev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa);
if (!adapter->shared) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
pci_name(pdev));
err = -ENOMEM;
goto err_alloc_shared;
}
adapter->num_rx_queues = num_rx_queues;
adapter->num_tx_queues = num_tx_queues;
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
&adapter->queue_desc_pa);
if (!adapter->tqd_start) {
printk(KERN_ERR "Failed to allocate memory for %s\n",
pci_name(pdev));
err = -ENOMEM;
goto err_alloc_queue_desc;
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
if (adapter->pm_conf == NULL) {
err = -ENOMEM;
goto err_alloc_pm;
}
#ifdef VMXNET3_RSS
adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
if (adapter->rss_conf == NULL) {
err = -ENOMEM;
goto err_alloc_rss;
}
#endif /* VMXNET3_RSS */
err = vmxnet3_alloc_pci_resources(adapter, &dma64);
if (err < 0)
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
} else {
printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
" %s\n", ver, pci_name(pdev));
err = -EBUSY;
goto err_ver;
}
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
} else {
printk(KERN_ERR "Incompatible upt version (0x%x) for "
"adapter %s\n", ver, pci_name(pdev));
err = -EBUSY;
goto err_ver;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
adapter->share_intr = irq_share_mode;
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
adapter->num_tx_queues != adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
vmxnet3_alloc_intr_resources(adapter);
#ifdef VMXNET3_RSS
if (adapter->num_rx_queues > 1 &&
adapter->intr.type == VMXNET3_IT_MSIX) {
adapter->rss = true;
printk(KERN_INFO "RSS is enabled.\n");
} else {
adapter->rss = false;
}
#endif
vmxnet3_read_mac_addr(adapter, mac);
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
if (adapter->intr.type == VMXNET3_IT_MSIX) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
netif_napi_add(adapter->netdev,
&adapter->rx_queue[i].napi,
vmxnet3_poll_rx_only, 64);
}
} else {
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
vmxnet3_poll, 64);
}
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
err = register_netdev(netdev);
if (err) {
printk(KERN_ERR "Failed to register adapter %s\n",
pci_name(pdev));
goto err_register;
}
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
vmxnet3_check_link(adapter, false);
atomic_inc(&devices_found);
return 0;
err_register:
vmxnet3_free_intr_resources(adapter);
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
#ifdef VMXNET3_RSS
kfree(adapter->rss_conf);
err_alloc_rss:
#endif
kfree(adapter->pm_conf);
err_alloc_pm:
pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
adapter->queue_desc_pa);
err_alloc_queue_desc:
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
err_alloc_shared:
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
static void __devexit
vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int size = 0;
int num_rx_queues;
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
(int)num_online_cpus());
else
#endif
num_rx_queues = 1;
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
cancel_work_sync(&adapter->work);
unregister_netdev(netdev);
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
#ifdef VMXNET3_RSS
kfree(adapter->rss_conf);
#endif
kfree(adapter->pm_conf);
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
adapter->queue_desc_pa);
pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
free_netdev(netdev);
}
#ifdef CONFIG_PM
static int
vmxnet3_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_PMConf *pmConf;
struct ethhdr *ehdr;
struct arphdr *ahdr;
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
unsigned long flags;
int i = 0;
if (!netif_running(netdev))
return 0;
for (i = 0; i < adapter->num_rx_queues; i++)
napi_disable(&adapter->rx_queue[i].napi);
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
memset(pmConf, 0, sizeof(*pmConf));
if (adapter->wol & WAKE_UCAST) {
pmConf->filters[i].patternSize = ETH_ALEN;
pmConf->filters[i].maskSize = 1;
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
if (adapter->wol & WAKE_ARP) {
in_dev = in_dev_get(netdev);
if (!in_dev)
goto skip_arp;
ifa = (struct in_ifaddr *)in_dev->ifa_list;
if (!ifa)
goto skip_arp;
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
sizeof(struct arphdr) + /* ARP header */
2 * ETH_ALEN + /* 2 Ethernet addresses*/
2 * sizeof(u32); /*2 IPv4 addresses */
pmConf->filters[i].maskSize =
(pmConf->filters[i].patternSize - 1) / 8 + 1;
/* ETH_P_ARP in Ethernet header. */
ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
ehdr->h_proto = htons(ETH_P_ARP);
/* ARPOP_REQUEST in ARP header. */
ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
ahdr->ar_op = htons(ARPOP_REQUEST);
arpreq = (u8 *)(ahdr + 1);
/* The Unicast IPv4 address in 'tip' field. */
arpreq += 2 * ETH_ALEN + sizeof(u32);
*(u32 *)arpreq = ifa->ifa_address;
/* The mask for the relevant bits. */
pmConf->filters[i].mask[0] = 0x00;
pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
pmConf->filters[i].mask[3] = 0x00;
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
adapter->wol);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
return 0;
}
static int
vmxnet3_resume(struct device *device)
{
int err, i = 0;
unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_PMConf *pmConf;
if (!netif_running(netdev))
return 0;
/* Destroy wake-up filters. */
pmConf = adapter->pm_conf;
memset(pmConf, 0, sizeof(*pmConf));
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device_mem(pdev);
if (err != 0)
return err;
pci_enable_wake(pdev, PCI_D0, 0);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_alloc_intr_resources(adapter);
vmxnet3_request_irqs(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
return 0;
}
static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
};
#endif
static struct pci_driver vmxnet3_driver = {
.name = vmxnet3_driver_name,
.id_table = vmxnet3_pciid_table,
.probe = vmxnet3_probe_device,
.remove = __devexit_p(vmxnet3_remove_device),
#ifdef CONFIG_PM
.driver.pm = &vmxnet3_pm_ops,
#endif
};
static int __init
vmxnet3_init_module(void)
{
printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
VMXNET3_DRIVER_VERSION_REPORT);
return pci_register_driver(&vmxnet3_driver);
}
module_init(vmxnet3_init_module);
static void
vmxnet3_exit_module(void)
{
pci_unregister_driver(&vmxnet3_driver);
}
module_exit(vmxnet3_exit_module);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
| gpl-2.0 |
madmaze/Meson-3-Kernel | drivers/misc/sgi-gru/gruprocfs.c | 4274 | 10144 | /*
* SN Platform GRU Driver
*
* PROC INTERFACES
*
* This file supports the /proc interfaces for the GRU driver
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
#define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
{
unsigned long val = atomic_long_read(v);
seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
{
printstat(s, vdata_alloc);
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
printstat(s, gms_alloc);
printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
printstat(s, free_context);
printstat(s, load_user_context);
printstat(s, load_kernel_context);
printstat(s, lock_kernel_context);
printstat(s, unlock_kernel_context);
printstat(s, steal_user_context);
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
printstat(s, intr_cbr);
printstat(s, intr_tfh);
printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
printstat(s, check_context_retarget_intr);
printstat(s, check_context_unload);
printstat(s, tlb_dropin);
printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
printstat(s, tlb_dropin_fail_range_active);
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
printstat(s, mesq_send_failed);
printstat(s, mesq_noop);
printstat(s, mesq_send_unexpected_error);
printstat(s, mesq_send_lb_overflow);
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
printstat(s, mesq_qf_unexpected_error);
printstat(s, mesq_noop_unexpected_error);
printstat(s, mesq_noop_lb_overflow);
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
printstat(s, mesq_noop_page_overflow);
return 0;
}
static ssize_t statistics_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *data)
{
memset(&gru_stats, 0, sizeof(gru_stats));
return count;
}
static int mcs_statistics_show(struct seq_file *s, void *p)
{
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
max = mcs_op_statistics[op].max;
seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
count ? total / count : 0, max);
}
return 0;
}
static ssize_t mcs_statistics_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *data)
{
memset(mcs_op_statistics, 0, sizeof(mcs_op_statistics));
return count;
}
static int options_show(struct seq_file *s, void *p)
{
seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
static ssize_t options_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *data)
{
char buf[20];
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strict_strtoul(buf, 0, &gru_options))
return -EINVAL;
return count;
}
static int cch_seq_show(struct seq_file *file, void *data)
{
long gid = *(long *)data;
int i;
struct gru_state *gru = GID_TO_GRU(gid);
struct gru_thread_state *ts;
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
"ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
GRU_OPT_MISS_MASK]);
}
return 0;
}
static int gru_seq_show(struct seq_file *file, void *data)
{
long gid = *(long *)data, ctxfree, cbrfree, dsrfree;
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
"ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
"busy", "busy", "free", "free", "free");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
cbrfree = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
dsrfree = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
seq_printf(file, " %5d%5d%7ld%6ld%6ld%8ld%6ld%6ld\n",
gru->gs_gid, gru->gs_blade_id, GRU_NUM_CCH - ctxfree,
GRU_NUM_CBE - cbrfree, GRU_NUM_DSR_BYTES - dsrfree,
ctxfree, cbrfree, dsrfree);
}
return 0;
}
static void seq_stop(struct seq_file *file, void *data)
{
}
static void *seq_start(struct seq_file *file, loff_t *gid)
{
if (*gid < gru_max_gids)
return gid;
return NULL;
}
static void *seq_next(struct seq_file *file, void *data, loff_t *gid)
{
(*gid)++;
if (*gid < gru_max_gids)
return gid;
return NULL;
}
static const struct seq_operations cch_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = cch_seq_show
};
static const struct seq_operations gru_seq_ops = {
.start = seq_start,
.next = seq_next,
.stop = seq_stop,
.show = gru_seq_show
};
static int statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, statistics_show, NULL);
}
static int mcs_statistics_open(struct inode *inode, struct file *file)
{
return single_open(file, mcs_statistics_show, NULL);
}
static int options_open(struct inode *inode, struct file *file)
{
return single_open(file, options_show, NULL);
}
static int cch_open(struct inode *inode, struct file *file)
{
return seq_open(file, &cch_seq_ops);
}
static int gru_open(struct inode *inode, struct file *file)
{
return seq_open(file, &gru_seq_ops);
}
/* *INDENT-OFF* */
static const struct file_operations statistics_fops = {
.open = statistics_open,
.read = seq_read,
.write = statistics_write,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations mcs_statistics_fops = {
.open = mcs_statistics_open,
.read = seq_read,
.write = mcs_statistics_write,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations options_fops = {
.open = options_open,
.read = seq_read,
.write = options_write,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations cch_fops = {
.open = cch_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations gru_fops = {
.open = gru_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_entry {
char *name;
int mode;
const struct file_operations *fops;
struct proc_dir_entry *entry;
} proc_files[] = {
{"statistics", 0644, &statistics_fops},
{"mcs_statistics", 0644, &mcs_statistics_fops},
{"debug_options", 0644, &options_fops},
{"cch_status", 0444, &cch_fops},
{"gru_status", 0444, &gru_fops},
{NULL}
};
/* *INDENT-ON* */
static struct proc_dir_entry *proc_gru __read_mostly;
static int create_proc_file(struct proc_entry *p)
{
p->entry = proc_create(p->name, p->mode, proc_gru, p->fops);
if (!p->entry)
return -1;
return 0;
}
static void delete_proc_files(void)
{
struct proc_entry *p;
if (proc_gru) {
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
remove_proc_entry("gru", proc_gru->parent);
}
}
int gru_proc_init(void)
{
struct proc_entry *p;
proc_gru = proc_mkdir("sgi_uv/gru", NULL);
for (p = proc_files; p->name; p++)
if (create_proc_file(p))
goto err;
return 0;
err:
delete_proc_files();
return -1;
}
void gru_proc_exit(void)
{
delete_proc_files();
}
| gpl-2.0 |
TheEdge-/Leaping_kernel | drivers/usb/host/ohci-hcd.c | 4530 | 34280 | /*
* Open Host Controller Interface (OHCI) driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
*
* [ Initialisation is based on Linus' ]
* [ uhci code and gregs ohci fragments ]
* [ (C) Copyright 1999 Linus Torvalds ]
* [ (C) Copyright 1999 Gregory P. Smith]
*
*
* OHCI is the main "non-Intel/VIA" standard for USB 1.1 host controller
* interfaces (though some non-x86 Intel chips use it). It supports
* smarter hardware than UHCI. A download link for the spec available
* through the http://www.usb.org website.
*
* This file is licenced under the GPL.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#define DRIVER_AUTHOR "Roman Weissgaerber, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
/*-------------------------------------------------------------------------*/
#undef OHCI_VERBOSE_DEBUG /* not always helpful */
/* For initializing controller (mask in an HCFS mode too) */
#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
#define OHCI_INTR_INIT \
(OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE \
| OHCI_INTR_RD | OHCI_INTR_WDH)
#ifdef __hppa__
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
#define IR_DISABLE
#endif
#ifdef CONFIG_ARCH_OMAP
/* OMAP doesn't support IR (no SMM; not needed) */
#define IR_DISABLE
#endif
/*-------------------------------------------------------------------------*/
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#include "ohci.h"
#include "pci-quirks.h"
static void ohci_dump (struct ohci_hcd *ohci, int verbose);
static int ohci_init (struct ohci_hcd *ohci);
static void ohci_stop (struct usb_hcd *hcd);
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
static int ohci_restart (struct ohci_hcd *ohci);
#endif
#ifdef CONFIG_PCI
static void sb800_prefetch(struct ohci_hcd *ohci, int on);
#else
static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
{
return;
}
#endif
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
#include "ohci-q.c"
/*
* On architectures with edge-triggered interrupts we must never return
* IRQ_NONE.
*/
#if defined(CONFIG_SA1111) /* ... or other edge-triggered systems */
#define IRQ_NOTMINE IRQ_HANDLED
#else
#define IRQ_NOTMINE IRQ_NONE
#endif
/* Some boards misreport power switching/overcurrent */
static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
/*-------------------------------------------------------------------------*/
/*
* queue up an urb for anything except the root hub
*/
static int ohci_urb_enqueue (
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ed *ed;
urb_priv_t *urb_priv;
unsigned int pipe = urb->pipe;
int i, size = 0;
unsigned long flags;
int retval = 0;
#ifdef OHCI_VERBOSE_DEBUG
urb_print(urb, "SUB", usb_pipein(pipe), -EINPROGRESS);
#endif
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
switch (ed->type) {
case PIPE_CONTROL:
/* td_submit_urb() doesn't yet handle these */
if (urb->transfer_buffer_length > 4096)
return -EMSGSIZE;
/* 1 TD for setup, 1 for ACK, plus ... */
size = 2;
/* FALLTHROUGH */
// case PIPE_INTERRUPT:
// case PIPE_BULK:
default:
/* one TD for every 4096 Bytes (can be up to 8K) */
size += urb->transfer_buffer_length / 4096;
/* ... and for any remaining bytes ... */
if ((urb->transfer_buffer_length % 4096) != 0)
size++;
/* ... and maybe a zero length packet to wrap it up */
if (size == 0)
size++;
else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
&& (urb->transfer_buffer_length
% usb_maxpacket (urb->dev, pipe,
usb_pipeout (pipe))) == 0)
size++;
break;
case PIPE_ISOCHRONOUS: /* number of packets from URB */
size = urb->number_of_packets;
break;
}
/* allocate the private part of the URB */
urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
mem_flags);
if (!urb_priv)
return -ENOMEM;
INIT_LIST_HEAD (&urb_priv->pending);
urb_priv->length = size;
urb_priv->ed = ed;
/* allocate the TDs (deferring hash chain updates) */
for (i = 0; i < size; i++) {
urb_priv->td [i] = td_alloc (ohci, mem_flags);
if (!urb_priv->td [i]) {
urb_priv->length = i;
urb_free_priv (ohci, urb_priv);
return -ENOMEM;
}
}
spin_lock_irqsave (&ohci->lock, flags);
/* don't submit to a dead HC */
if (!HCD_HW_ACCESSIBLE(hcd)) {
retval = -ENODEV;
goto fail;
}
if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
retval = usb_hcd_link_urb_to_ep(hcd, urb);
if (retval)
goto fail;
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
if (retval < 0) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
goto fail;
}
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = ohci_frame_no(ohci);
/* delay a few frames before the first TD */
frame += max_t (u16, 8, ed->interval);
frame &= ~(ed->interval - 1);
frame |= ed->branch;
urb->start_frame = frame;
/* yes, only URB_ISO_ASAP is supported, and
* urb->start_frame is never used as input.
*/
}
} else if (ed->type == PIPE_ISOCHRONOUS)
urb->start_frame = ed->last_iso + ed->interval;
/* fill the TDs and link them to the ed; and
* enable that part of the schedule, if needed
* and update count of queued periodic urbs
*/
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
spin_unlock_irqrestore (&ohci->lock, flags);
return retval;
}
/*
* decouple the URB from the HC queues (TDs, urb_priv).
* reporting is always done
* asynchronously, and we might be dealing with an urb that's
* partially transferred, or an ED with other urbs being unlinked.
*/
static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
int rc;
#ifdef OHCI_VERBOSE_DEBUG
urb_print(urb, "UNLINK", 1, status);
#endif
spin_lock_irqsave (&ohci->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
* handed to us, flag it for unlink and giveback, and force
* some upcoming INTR_SF to call finish_unlinks()
*/
urb_priv = urb->hcpriv;
if (urb_priv) {
if (urb_priv->ed->state == ED_OPER)
start_ed_unlink (ohci, urb_priv->ed);
}
} else {
/*
* with HC dead, we won't respect hc queue pointers
* any more ... just clean up every urb's memory.
*/
if (urb->hcpriv)
finish_urb(ohci, urb, status);
}
spin_unlock_irqrestore (&ohci->lock, flags);
return rc;
}
/*-------------------------------------------------------------------------*/
/* frees config/altsetting state for endpoints,
* including ED memory, dummy TD, and bulk/intr data toggle
*/
static void
ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
struct ed *ed = ep->hcpriv;
unsigned limit = 1000;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
if (!ed)
return;
rescan:
spin_lock_irqsave (&ohci->lock, flags);
if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
ohci->eds_scheduled--;
finish_unlinks (ohci, 0);
}
switch (ed->state) {
case ED_UNLINK: /* wait for hw to finish? */
/* major IRQ delivery trouble loses INTR_SF too... */
if (limit-- == 0) {
ohci_warn(ohci, "ED unlink timeout\n");
if (quirk_zfmicro(ohci)) {
ohci_warn(ohci, "Attempting ZF TD recovery\n");
ohci->ed_to_check = ed;
ohci->zf_delay = 2;
}
goto sanitize;
}
spin_unlock_irqrestore (&ohci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case ED_IDLE: /* fully unlinked */
if (list_empty (&ed->td_list)) {
td_free (ohci, ed->dummy);
ed_free (ohci, ed);
break;
}
/* else FALL THROUGH */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
*/
ohci_err (ohci, "leak ed %p (#%02x) state %d%s\n",
ed, ep->desc.bEndpointAddress, ed->state,
list_empty (&ed->td_list) ? "" : " (has tds)");
td_free (ohci, ed->dummy);
break;
}
ep->hcpriv = NULL;
spin_unlock_irqrestore (&ohci->lock, flags);
}
static int ohci_get_frame (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
return ohci_frame_no(ohci);
}
static void ohci_usb_reset (struct ohci_hcd *ohci)
{
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
static void
ohci_shutdown (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
ohci = hcd_to_ohci (hcd);
ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable);
/* Software reset, after which the controller goes into SUSPEND */
ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus);
ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */
udelay(10);
ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval);
}
static int check_ed(struct ohci_hcd *ohci, struct ed *ed)
{
return (hc32_to_cpu(ohci, ed->hwINFO) & ED_IN) != 0
&& (hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK)
== (hc32_to_cpu(ohci, ed->hwTailP) & TD_MASK)
&& !list_empty(&ed->td_list);
}
/* ZF Micro watchdog timer callback. The ZF Micro chipset sometimes completes
* an interrupt TD but neglects to add it to the donelist. On systems with
* this chipset, we need to periodically check the state of the queues to look
* for such "lost" TDs.
*/
static void unlink_watchdog_func(unsigned long _ohci)
{
unsigned long flags;
unsigned max;
unsigned seen_count = 0;
unsigned i;
struct ed **seen = NULL;
struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
spin_lock_irqsave(&ohci->lock, flags);
max = ohci->eds_scheduled;
if (!max)
goto done;
if (ohci->ed_to_check)
goto out;
seen = kcalloc(max, sizeof *seen, GFP_ATOMIC);
if (!seen)
goto out;
for (i = 0; i < NUM_INTS; i++) {
struct ed *ed = ohci->periodic[i];
while (ed) {
unsigned temp;
/* scan this branch of the periodic schedule tree */
for (temp = 0; temp < seen_count; temp++) {
if (seen[temp] == ed) {
/* we've checked it and what's after */
ed = NULL;
break;
}
}
if (!ed)
break;
seen[seen_count++] = ed;
if (!check_ed(ohci, ed)) {
ed = ed->ed_next;
continue;
}
/* HC's TD list is empty, but HCD sees at least one
* TD that's not been sent through the donelist.
*/
ohci->ed_to_check = ed;
ohci->zf_delay = 2;
/* The HC may wait until the next frame to report the
* TD as done through the donelist and INTR_WDH. (We
* just *assume* it's not a multi-TD interrupt URB;
* those could defer the IRQ more than one frame, using
* DI...) Check again after the next INTR_SF.
*/
ohci_writel(ohci, OHCI_INTR_SF,
&ohci->regs->intrstatus);
ohci_writel(ohci, OHCI_INTR_SF,
&ohci->regs->intrenable);
/* flush those writes */
(void) ohci_readl(ohci, &ohci->regs->control);
goto out;
}
}
out:
kfree(seen);
if (ohci->eds_scheduled)
mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
done:
spin_unlock_irqrestore(&ohci->lock, flags);
}
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
/* init memory, and kick BIOS/SMM off */
static int ohci_init (struct ohci_hcd *ohci)
{
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
* was never needed for most non-PCI systems ... remove the code?
*/
#ifndef IR_DISABLE
/* SMM owns the HC? not for long! */
if (!no_handshake && ohci_readl (ohci,
&ohci->regs->control) & OHCI_CTRL_IR) {
u32 temp;
ohci_dbg (ohci, "USB HC TakeOver from BIOS/SMM\n");
/* this timeout is arbitrary. we make it long, so systems
* depending on usb keyboards may be usable even if the
* BIOS/SMM code seems pretty broken.
*/
temp = 500; /* arbitrary: five seconds */
ohci_writel (ohci, OHCI_INTR_OC, &ohci->regs->intrenable);
ohci_writel (ohci, OHCI_OCR, &ohci->regs->cmdstatus);
while (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_IR) {
msleep (10);
if (--temp == 0) {
ohci_err (ohci, "USB HC takeover failed!"
" (BIOS/SMM bug)\n");
return -EBUSY;
}
}
ohci_usb_reset (ohci);
}
#endif
/* Disable HC interrupts */
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
/* flush the writes, and save key bits like RWC */
if (ohci_readl (ohci, &ohci->regs->control) & OHCI_CTRL_RWC)
ohci->hc_control |= OHCI_CTRL_RWC;
/* Read the number of ports unless overridden */
if (ohci->num_ports == 0)
ohci->num_ports = roothub_a(ohci) & RH_A_NDP;
if (ohci->hcca)
return 0;
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof *ohci->hcca, &ohci->hcca_dma, 0);
if (!ohci->hcca)
return -ENOMEM;
if ((ret = ohci_mem_init (ohci)) < 0)
ohci_stop (hcd);
else {
create_debug_files (ohci);
}
return ret;
}
/*-------------------------------------------------------------------------*/
/* Start an OHCI controller, set the BUS operational
* resets USB and controller
* enable interrupts
*/
static int ohci_run (struct ohci_hcd *ohci)
{
u32 mask, val;
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
val = ohci_readl (ohci, &ohci->regs->fminterval);
ohci->fminterval = val & 0x3fff;
if (ohci->fminterval != FI)
ohci_dbg (ohci, "fminterval delta %d\n",
ohci->fminterval - FI);
ohci->fminterval |= FSMP (ohci->fminterval) << 16;
/* also: power/overcurrent flags in roothub.a */
}
/* Reset USB nearly "by the book". RemoteWakeupConnected has
* to be checked in case boot firmware (BIOS/SMM/...) has set up
* wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
* If the bus glue detected wakeup capability then it should
* already be enabled; if so we'll just enable it again.
*/
if ((ohci->hc_control & OHCI_CTRL_RWC) != 0)
device_set_wakeup_capable(hcd->self.controller, 1);
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
val = 0;
break;
case OHCI_USB_SUSPEND:
case OHCI_USB_RESUME:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESUME;
val = 10 /* msec wait */;
break;
// case OHCI_USB_RESET:
default:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_USB_RESET;
val = 50 /* msec wait */;
break;
}
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush the writes
(void) ohci_readl (ohci, &ohci->regs->control);
msleep(val);
memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
/* 2msec timelimit here means no irqs/preempt */
spin_lock_irq (&ohci->lock);
retry:
/* HC Reset requires max 10 us delay */
ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
val = 30; /* ... allow extra time */
while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
if (--val == 0) {
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "USB HC reset timed out!\n");
return -1;
}
udelay (1);
}
/* now we're in the SUSPEND state ... must go OPERATIONAL
* within 2msec else HC enters RESUME
*
* ... but some hardware won't init fmInterval "by the book"
* (SiS, OPTi ...), so reset again instead. SiS doesn't need
* this if we write fmInterval after we're OPERATIONAL.
* Unclear about ALi, ServerWorks, and others ... this could
* easily be a longstanding bug in chip init on Linux.
*/
if (ohci->flags & OHCI_QUIRK_INITRESET) {
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
/* Tell the controller where the control and bulk lists are
* The lists are empty now. */
ohci_writel (ohci, 0, &ohci->regs->ed_controlhead);
ohci_writel (ohci, 0, &ohci->regs->ed_bulkhead);
/* a reset clears this */
ohci_writel (ohci, (u32) ohci->hcca_dma, &ohci->regs->hcca);
periodic_reinit (ohci);
/* some OHCI implementations are finicky about how they init.
* bogus values here mean not even enumeration could work.
*/
if ((ohci_readl (ohci, &ohci->regs->fminterval) & 0x3fff0000) == 0
|| !ohci_readl (ohci, &ohci->regs->periodicstart)) {
if (!(ohci->flags & OHCI_QUIRK_INITRESET)) {
ohci->flags |= OHCI_QUIRK_INITRESET;
ohci_dbg (ohci, "enabling initreset quirk\n");
goto retry;
}
spin_unlock_irq (&ohci->lock);
ohci_err (ohci, "init err (%08x %04x)\n",
ohci_readl (ohci, &ohci->regs->fminterval),
ohci_readl (ohci, &ohci->regs->periodicstart));
return -EOVERFLOW;
}
/* use rhsc irqs after khubd is fully initialized */
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
hcd->uses_new_polling = 1;
/* start controller operations */
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
/* Choose the interrupts we care about now, others later on demand */
mask = OHCI_INTR_INIT;
ohci_writel (ohci, ~0, &ohci->regs->intrstatus);
ohci_writel (ohci, mask, &ohci->regs->intrenable);
/* handle root hub init quirks ... */
val = roothub_a (ohci);
val &= ~(RH_A_PSM | RH_A_OCPM);
if (ohci->flags & OHCI_QUIRK_SUPERIO) {
/* NSC 87560 and maybe others */
val |= RH_A_NOCP;
val &= ~(RH_A_POTPGT | RH_A_NPS);
ohci_writel (ohci, val, &ohci->regs->roothub.a);
} else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
(ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
val |= RH_A_NPS;
ohci_writel (ohci, val, &ohci->regs->roothub.a);
}
ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
&ohci->regs->roothub.b);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
spin_unlock_irq (&ohci->lock);
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
setup_timer(&ohci->unlink_watchdog, unlink_watchdog_func,
(unsigned long) ohci);
ohci->eds_scheduled = 0;
ohci->ed_to_check = NULL;
}
ohci_dump (ohci, 1);
return 0;
}
/*-------------------------------------------------------------------------*/
/* an interrupt happens */
static irqreturn_t ohci_irq (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
struct ohci_regs __iomem *regs = ohci->regs;
int ints;
/* Read interrupt status (and flush pending writes). We ignore the
* optimization of checking the LSB of hcca->done_head; it doesn't
* work on all systems (edge triggering for OHCI can be a factor).
*/
ints = ohci_readl(ohci, ®s->intrstatus);
/* Check for an all 1's result which is a typical consequence
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
}
/* We only care about interrupts that are enabled */
ints &= ohci_readl(ohci, ®s->intrenable);
/* interrupt for some other device? */
if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
// e.g. due to PCI Master/Target Abort
if (quirk_nec(ohci)) {
/* Workaround for a silicon bug in some NEC chips used
* in Apple's PowerBooks. Adapted from Darwin code.
*/
ohci_err (ohci, "OHCI Unrecoverable Error, scheduling NEC chip restart\n");
ohci_writel (ohci, OHCI_INTR_UE, ®s->intrdisable);
schedule_work (&ohci->nec_work);
} else {
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
ohci_dump (ohci, 1);
ohci_usb_reset (ohci);
}
if (ints & OHCI_INTR_RHSC) {
ohci_vdbg(ohci, "rhsc\n");
ohci->next_statechange = jiffies + STATECHANGE_DELAY;
ohci_writel(ohci, OHCI_INTR_RD | OHCI_INTR_RHSC,
®s->intrstatus);
/* NOTE: Vendors didn't always make the same implementation
* choices for RHSC. Many followed the spec; RHSC triggers
* on an edge, like setting and maybe clearing a port status
* change bit. With others it's level-triggered, active
* until khubd clears all the port status change bits. We'll
* always disable it here and rely on polling until khubd
* re-enables it.
*/
ohci_writel(ohci, OHCI_INTR_RHSC, ®s->intrdisable);
usb_hcd_poll_rh_status(hcd);
}
/* For connect and disconnect events, we expect the controller
* to turn on RHSC along with RD. But for remote wakeup events
* this might not happen.
*/
else if (ints & OHCI_INTR_RD) {
ohci_vdbg(ohci, "resume detect\n");
ohci_writel(ohci, OHCI_INTR_RD, ®s->intrstatus);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
if (ohci->autostop) {
spin_lock (&ohci->lock);
ohci_rh_resume (ohci);
spin_unlock (&ohci->lock);
} else
usb_hcd_resume_root_hub(hcd);
}
if (ints & OHCI_INTR_WDH) {
spin_lock (&ohci->lock);
dl_done_list (ohci);
spin_unlock (&ohci->lock);
}
if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
spin_lock(&ohci->lock);
if (ohci->ed_to_check) {
struct ed *ed = ohci->ed_to_check;
if (check_ed(ohci, ed)) {
/* HC thinks the TD list is empty; HCD knows
* at least one TD is outstanding
*/
if (--ohci->zf_delay == 0) {
struct td *td = list_entry(
ed->td_list.next,
struct td, td_list);
ohci_warn(ohci,
"Reclaiming orphan TD %p\n",
td);
takeback_td(ohci, td);
ohci->ed_to_check = NULL;
}
} else
ohci->ed_to_check = NULL;
}
spin_unlock(&ohci->lock);
}
/* could track INTR_SO to reduce available PCI/... bandwidth */
/* handle any pending URB/ED unlinks, leaving INTR_SF enabled
* when there's still unlinking to be done (next frame).
*/
spin_lock (&ohci->lock);
if (ohci->ed_rm_list)
finish_unlinks (ohci, ohci_frame_no(ohci));
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
&& ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, ®s->intrdisable);
spin_unlock (&ohci->lock);
if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, ®s->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, ®s->intrenable);
// flush those writes
(void) ohci_readl (ohci, &ohci->regs->control);
}
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*/
static void ohci_stop (struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
ohci_dump (ohci, 1);
if (quirk_nec(ohci))
flush_work_sync(&ohci->nec_work);
ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
free_irq(hcd->irq, hcd);
hcd->irq = 0;
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
if (quirk_amdiso(ohci))
usb_amd_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
if (ohci->hcca) {
dma_free_coherent (hcd->self.controller,
sizeof *ohci->hcca,
ohci->hcca, ohci->hcca_dma);
ohci->hcca = NULL;
ohci->hcca_dma = 0;
}
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_PM) || defined(CONFIG_PCI)
/* must not be called from interrupt context */
static int ohci_restart (struct ohci_hcd *ohci)
{
int temp;
int i;
struct urb_priv *priv;
spin_lock_irq(&ohci->lock);
ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
ohci_dbg(ohci, "abort schedule...\n");
list_for_each_entry (priv, &ohci->pending, pending) {
struct urb *urb = priv->td[0]->urb;
struct ed *ed = priv->ed;
switch (ed->state) {
case ED_OPER:
ed->state = ED_UNLINK;
ed->hwINFO |= cpu_to_hc32(ohci, ED_DEQUEUE);
ed_deschedule (ohci, ed);
ed->ed_next = ohci->ed_rm_list;
ed->ed_prev = NULL;
ohci->ed_rm_list = ed;
/* FALLTHROUGH */
case ED_UNLINK:
break;
default:
ohci_dbg(ohci, "bogus ed %p state %d\n",
ed, ed->state);
}
if (!urb->unlinked)
urb->unlinked = -ESHUTDOWN;
}
finish_unlinks (ohci, 0);
spin_unlock_irq(&ohci->lock);
/* paranoia, in case that didn't work: */
/* empty the interrupt branches */
for (i = 0; i < NUM_INTS; i++) ohci->load [i] = 0;
for (i = 0; i < NUM_INTS; i++) ohci->hcca->int_table [i] = 0;
/* no EDs to remove */
ohci->ed_rm_list = NULL;
/* empty control and bulk lists */
ohci->ed_controltail = NULL;
ohci->ed_bulktail = NULL;
if ((temp = ohci_run (ohci)) < 0) {
ohci_err (ohci, "can't restart, %d\n", temp);
return temp;
}
ohci_dbg(ohci, "restart complete\n");
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
MODULE_AUTHOR (DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE ("GPL");
#ifdef CONFIG_PCI
#include "ohci-pci.c"
#define PCI_DRIVER ohci_pci_driver
#endif
#if defined(CONFIG_ARCH_SA1100) && defined(CONFIG_SA1111)
#include "ohci-sa1111.c"
#define SA1111_DRIVER ohci_hcd_sa1111_driver
#endif
#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
#include "ohci-s3c2410.c"
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
#ifdef CONFIG_USB_OHCI_EXYNOS
#include "ohci-exynos.c"
#define PLATFORM_DRIVER exynos_ohci_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_OMAP3
#include "ohci-omap3.c"
#define OMAP3_PLATFORM_DRIVER ohci_hcd_omap3_driver
#endif
#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
#include "ohci-pxa27x.c"
#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
#endif
#ifdef CONFIG_ARCH_EP93XX
#include "ohci-ep93xx.c"
#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
#endif
#ifdef CONFIG_MIPS_ALCHEMY
#include "ohci-au1xxx.c"
#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
#endif
#ifdef CONFIG_PNX8550
#include "ohci-pnx8550.c"
#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
#include "ohci-ppc-soc.c"
#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
#endif
#ifdef CONFIG_ARCH_AT91
#include "ohci-at91.c"
#define PLATFORM_DRIVER ohci_hcd_at91_driver
#endif
#if defined(CONFIG_ARCH_PNX4008) || defined(CONFIG_ARCH_LPC32XX)
#include "ohci-nxp.c"
#define PLATFORM_DRIVER usb_hcd_nxp_driver
#endif
#ifdef CONFIG_ARCH_DAVINCI_DA8XX
#include "ohci-da8xx.c"
#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
#endif
#ifdef CONFIG_USB_OHCI_SH
#include "ohci-sh.c"
#define PLATFORM_DRIVER ohci_hcd_sh_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
#include "ohci-ppc-of.c"
#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_PLAT_SPEAR
#include "ohci-spear.c"
#define PLATFORM_DRIVER spear_ohci_hcd_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ohci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_SSB
#include "ohci-ssb.c"
#define SSB_OHCI_DRIVER ssb_ohci_driver
#endif
#ifdef CONFIG_MFD_SM501
#include "ohci-sm501.c"
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
#endif
#ifdef CONFIG_MFD_TC6393XB
#include "ohci-tmio.c"
#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
#endif
#ifdef CONFIG_MACH_JZ4740
#include "ohci-jz4740.c"
#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
#endif
#ifdef CONFIG_USB_OCTEON_OHCI
#include "ohci-octeon.c"
#define PLATFORM_DRIVER ohci_octeon_driver
#endif
#ifdef CONFIG_USB_CNS3XXX_OHCI
#include "ohci-cns3xxx.c"
#define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver
#endif
#ifdef CONFIG_CPU_XLR
#include "ohci-xls.c"
#define PLATFORM_DRIVER ohci_xls_driver
#endif
#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
#include "ohci-platform.c"
#define PLATFORM_DRIVER ohci_platform_driver
#endif
#if !defined(PCI_DRIVER) && \
!defined(PLATFORM_DRIVER) && \
!defined(OMAP1_PLATFORM_DRIVER) && \
!defined(OMAP3_PLATFORM_DRIVER) && \
!defined(OF_PLATFORM_DRIVER) && \
!defined(SA1111_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && \
!defined(SM501_OHCI_DRIVER) && \
!defined(TMIO_OHCI_DRIVER) && \
!defined(SSB_OHCI_DRIVER)
#error "missing bus glue for ohci-hcd"
#endif
static int __init ohci_hcd_mod_init(void)
{
int retval = 0;
if (usb_disabled())
return -ENODEV;
printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
#ifdef DEBUG
ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
if (!ohci_debug_root) {
retval = -ENOENT;
goto error_debug;
}
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
retval = ps3_ohci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
if (retval < 0)
goto error_ps3;
#endif
#ifdef PLATFORM_DRIVER
retval = platform_driver_register(&PLATFORM_DRIVER);
if (retval < 0)
goto error_platform;
#endif
#ifdef OMAP1_PLATFORM_DRIVER
retval = platform_driver_register(&OMAP1_PLATFORM_DRIVER);
if (retval < 0)
goto error_omap1_platform;
#endif
#ifdef OMAP3_PLATFORM_DRIVER
retval = platform_driver_register(&OMAP3_PLATFORM_DRIVER);
if (retval < 0)
goto error_omap3_platform;
#endif
#ifdef OF_PLATFORM_DRIVER
retval = platform_driver_register(&OF_PLATFORM_DRIVER);
if (retval < 0)
goto error_of_platform;
#endif
#ifdef SA1111_DRIVER
retval = sa1111_driver_register(&SA1111_DRIVER);
if (retval < 0)
goto error_sa1111;
#endif
#ifdef PCI_DRIVER
retval = pci_register_driver(&PCI_DRIVER);
if (retval < 0)
goto error_pci;
#endif
#ifdef SSB_OHCI_DRIVER
retval = ssb_driver_register(&SSB_OHCI_DRIVER);
if (retval)
goto error_ssb;
#endif
#ifdef SM501_OHCI_DRIVER
retval = platform_driver_register(&SM501_OHCI_DRIVER);
if (retval < 0)
goto error_sm501;
#endif
#ifdef TMIO_OHCI_DRIVER
retval = platform_driver_register(&TMIO_OHCI_DRIVER);
if (retval < 0)
goto error_tmio;
#endif
return retval;
/* Error path */
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
error_tmio:
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
#endif
#ifdef SSB_OHCI_DRIVER
ssb_driver_unregister(&SSB_OHCI_DRIVER);
error_ssb:
#endif
#ifdef PCI_DRIVER
pci_unregister_driver(&PCI_DRIVER);
error_pci:
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
error_sa1111:
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
error_of_platform:
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
error_platform:
#endif
#ifdef OMAP1_PLATFORM_DRIVER
platform_driver_unregister(&OMAP1_PLATFORM_DRIVER);
error_omap1_platform:
#endif
#ifdef OMAP3_PLATFORM_DRIVER
platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
error_omap3_platform:
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
error_ps3:
#endif
#ifdef DEBUG
debugfs_remove(ohci_debug_root);
ohci_debug_root = NULL;
error_debug:
#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
return retval;
}
module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
#ifdef TMIO_OHCI_DRIVER
platform_driver_unregister(&TMIO_OHCI_DRIVER);
#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
#ifdef SSB_OHCI_DRIVER
ssb_driver_unregister(&SSB_OHCI_DRIVER);
#endif
#ifdef PCI_DRIVER
pci_unregister_driver(&PCI_DRIVER);
#endif
#ifdef SA1111_DRIVER
sa1111_driver_unregister(&SA1111_DRIVER);
#endif
#ifdef OF_PLATFORM_DRIVER
platform_driver_unregister(&OF_PLATFORM_DRIVER);
#endif
#ifdef PLATFORM_DRIVER
platform_driver_unregister(&PLATFORM_DRIVER);
#endif
#ifdef OMAP3_PLATFORM_DRIVER
platform_driver_unregister(&OMAP3_PLATFORM_DRIVER);
#endif
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ohci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
#ifdef DEBUG
debugfs_remove(ohci_debug_root);
#endif
clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
}
module_exit(ohci_hcd_mod_exit);
| gpl-2.0 |
CrawX/linux-fslc | arch/arm/mach-omap2/clockdomains44xx_data.c | 4786 | 13872 | /*
* OMAP4 Clock domains framework
*
* Copyright (C) 2009-2011 Texas Instruments, Inc.
* Copyright (C) 2009-2011 Nokia Corporation
*
* Abhijit Pagare (abhijitpagare@ti.com)
* Benoit Cousson (b-cousson@ti.com)
* Paul Walmsley (paul@pwsan.com)
*
* This file is automatically generated from the OMAP hardware databases.
* We respectfully ask that any modifications to this file be coordinated
* with the public linux-omap@vger.kernel.org mailing list and the
* authors above to ensure that the autogeneration scripts are kept
* up-to-date with the file contents.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include "clockdomain.h"
#include "cm1_44xx.h"
#include "cm2_44xx.h"
#include "cm-regbits-44xx.h"
#include "prm44xx.h"
#include "prcm44xx.h"
#include "prcm_mpu44xx.h"
/* Static Dependencies for OMAP4 Clock Domains */
static struct clkdm_dep d2d_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_2_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l3_init_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ NULL },
};
static struct clkdm_dep ducati_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_2_clkdm" },
{ .clkdm_name = "l3_dss_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l3_gfx_clkdm" },
{ .clkdm_name = "l3_init_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ .clkdm_name = "l4_secure_clkdm" },
{ .clkdm_name = "l4_wkup_clkdm" },
{ .clkdm_name = "tesla_clkdm" },
{ NULL },
};
static struct clkdm_dep iss_wkup_sleep_deps[] = {
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ NULL },
};
static struct clkdm_dep ivahd_wkup_sleep_deps[] = {
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ NULL },
};
static struct clkdm_dep l3_dma_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ducati_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_dss_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l3_init_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ .clkdm_name = "l4_secure_clkdm" },
{ .clkdm_name = "l4_wkup_clkdm" },
{ NULL },
};
static struct clkdm_dep l3_dss_wkup_sleep_deps[] = {
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_2_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ NULL },
};
static struct clkdm_dep l3_gfx_wkup_sleep_deps[] = {
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ NULL },
};
static struct clkdm_dep l3_init_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ .clkdm_name = "l4_secure_clkdm" },
{ .clkdm_name = "l4_wkup_clkdm" },
{ NULL },
};
static struct clkdm_dep l4_secure_wkup_sleep_deps[] = {
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ NULL },
};
static struct clkdm_dep mpu_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ducati_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_2_clkdm" },
{ .clkdm_name = "l3_dss_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l3_gfx_clkdm" },
{ .clkdm_name = "l3_init_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ .clkdm_name = "l4_secure_clkdm" },
{ .clkdm_name = "l4_wkup_clkdm" },
{ .clkdm_name = "tesla_clkdm" },
{ NULL },
};
static struct clkdm_dep tesla_wkup_sleep_deps[] = {
{ .clkdm_name = "abe_clkdm" },
{ .clkdm_name = "ivahd_clkdm" },
{ .clkdm_name = "l3_1_clkdm" },
{ .clkdm_name = "l3_2_clkdm" },
{ .clkdm_name = "l3_emif_clkdm" },
{ .clkdm_name = "l3_init_clkdm" },
{ .clkdm_name = "l4_cfg_clkdm" },
{ .clkdm_name = "l4_per_clkdm" },
{ .clkdm_name = "l4_wkup_clkdm" },
{ NULL },
};
static struct clockdomain l4_cefuse_44xx_clkdm = {
.name = "l4_cefuse_clkdm",
.pwrdm = { .name = "cefuse_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CEFUSE_INST,
.clkdm_offs = OMAP4430_CM2_CEFUSE_CEFUSE_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain l4_cfg_44xx_clkdm = {
.name = "l4_cfg_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_L4CFG_CDOFFS,
.dep_bit = OMAP4430_L4CFG_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
};
static struct clockdomain tesla_44xx_clkdm = {
.name = "tesla_clkdm",
.pwrdm = { .name = "tesla_pwrdm" },
.prcm_partition = OMAP4430_CM1_PARTITION,
.cm_inst = OMAP4430_CM1_TESLA_INST,
.clkdm_offs = OMAP4430_CM1_TESLA_TESLA_CDOFFS,
.dep_bit = OMAP4430_TESLA_STATDEP_SHIFT,
.wkdep_srcs = tesla_wkup_sleep_deps,
.sleepdep_srcs = tesla_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l3_gfx_44xx_clkdm = {
.name = "l3_gfx_clkdm",
.pwrdm = { .name = "gfx_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_GFX_INST,
.clkdm_offs = OMAP4430_CM2_GFX_GFX_CDOFFS,
.dep_bit = OMAP4430_GFX_STATDEP_SHIFT,
.wkdep_srcs = l3_gfx_wkup_sleep_deps,
.sleepdep_srcs = l3_gfx_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain ivahd_44xx_clkdm = {
.name = "ivahd_clkdm",
.pwrdm = { .name = "ivahd_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_IVAHD_INST,
.clkdm_offs = OMAP4430_CM2_IVAHD_IVAHD_CDOFFS,
.dep_bit = OMAP4430_IVAHD_STATDEP_SHIFT,
.wkdep_srcs = ivahd_wkup_sleep_deps,
.sleepdep_srcs = ivahd_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l4_secure_44xx_clkdm = {
.name = "l4_secure_clkdm",
.pwrdm = { .name = "l4per_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_L4PER_INST,
.clkdm_offs = OMAP4430_CM2_L4PER_L4SEC_CDOFFS,
.dep_bit = OMAP4430_L4SEC_STATDEP_SHIFT,
.wkdep_srcs = l4_secure_wkup_sleep_deps,
.sleepdep_srcs = l4_secure_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l4_per_44xx_clkdm = {
.name = "l4_per_clkdm",
.pwrdm = { .name = "l4per_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_L4PER_INST,
.clkdm_offs = OMAP4430_CM2_L4PER_L4PER_CDOFFS,
.dep_bit = OMAP4430_L4PER_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain abe_44xx_clkdm = {
.name = "abe_clkdm",
.pwrdm = { .name = "abe_pwrdm" },
.prcm_partition = OMAP4430_CM1_PARTITION,
.cm_inst = OMAP4430_CM1_ABE_INST,
.clkdm_offs = OMAP4430_CM1_ABE_ABE_CDOFFS,
.dep_bit = OMAP4430_ABE_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l3_instr_44xx_clkdm = {
.name = "l3_instr_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_L3INSTR_CDOFFS,
};
static struct clockdomain l3_init_44xx_clkdm = {
.name = "l3_init_clkdm",
.pwrdm = { .name = "l3init_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_L3INIT_INST,
.clkdm_offs = OMAP4430_CM2_L3INIT_L3INIT_CDOFFS,
.dep_bit = OMAP4430_L3INIT_STATDEP_SHIFT,
.wkdep_srcs = l3_init_wkup_sleep_deps,
.sleepdep_srcs = l3_init_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain d2d_44xx_clkdm = {
.name = "d2d_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_D2D_CDOFFS,
.wkdep_srcs = d2d_wkup_sleep_deps,
.sleepdep_srcs = d2d_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain mpu0_44xx_clkdm = {
.name = "mpu0_clkdm",
.pwrdm = { .name = "cpu0_pwrdm" },
.prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
.cm_inst = OMAP4430_PRCM_MPU_CPU0_INST,
.clkdm_offs = OMAP4430_PRCM_MPU_CPU0_CPU0_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain mpu1_44xx_clkdm = {
.name = "mpu1_clkdm",
.pwrdm = { .name = "cpu1_pwrdm" },
.prcm_partition = OMAP4430_PRCM_MPU_PARTITION,
.cm_inst = OMAP4430_PRCM_MPU_CPU1_INST,
.clkdm_offs = OMAP4430_PRCM_MPU_CPU1_CPU1_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain l3_emif_44xx_clkdm = {
.name = "l3_emif_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_MEMIF_CDOFFS,
.dep_bit = OMAP4430_MEMIF_STATDEP_SHIFT,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain l4_ao_44xx_clkdm = {
.name = "l4_ao_clkdm",
.pwrdm = { .name = "always_on_core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_ALWAYS_ON_INST,
.clkdm_offs = OMAP4430_CM2_ALWAYS_ON_ALWON_CDOFFS,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain ducati_44xx_clkdm = {
.name = "ducati_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_DUCATI_CDOFFS,
.dep_bit = OMAP4430_DUCATI_STATDEP_SHIFT,
.wkdep_srcs = ducati_wkup_sleep_deps,
.sleepdep_srcs = ducati_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain mpu_44xx_clkdm = {
.name = "mpuss_clkdm",
.pwrdm = { .name = "mpu_pwrdm" },
.prcm_partition = OMAP4430_CM1_PARTITION,
.cm_inst = OMAP4430_CM1_MPU_INST,
.clkdm_offs = OMAP4430_CM1_MPU_MPU_CDOFFS,
.wkdep_srcs = mpu_wkup_sleep_deps,
.sleepdep_srcs = mpu_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
static struct clockdomain l3_2_44xx_clkdm = {
.name = "l3_2_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_L3_2_CDOFFS,
.dep_bit = OMAP4430_L3_2_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
};
static struct clockdomain l3_1_44xx_clkdm = {
.name = "l3_1_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_L3_1_CDOFFS,
.dep_bit = OMAP4430_L3_1_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP,
};
static struct clockdomain iss_44xx_clkdm = {
.name = "iss_clkdm",
.pwrdm = { .name = "cam_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CAM_INST,
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dss_44xx_clkdm = {
.name = "l3_dss_clkdm",
.pwrdm = { .name = "dss_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_DSS_INST,
.clkdm_offs = OMAP4430_CM2_DSS_DSS_CDOFFS,
.dep_bit = OMAP4430_DSS_STATDEP_SHIFT,
.wkdep_srcs = l3_dss_wkup_sleep_deps,
.sleepdep_srcs = l3_dss_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
};
static struct clockdomain l4_wkup_44xx_clkdm = {
.name = "l4_wkup_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.prcm_partition = OMAP4430_PRM_PARTITION,
.cm_inst = OMAP4430_PRM_WKUP_CM_INST,
.clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
.dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT,
.flags = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
};
static struct clockdomain emu_sys_44xx_clkdm = {
.name = "emu_sys_clkdm",
.pwrdm = { .name = "emu_pwrdm" },
.prcm_partition = OMAP4430_PRM_PARTITION,
.cm_inst = OMAP4430_PRM_EMU_CM_INST,
.clkdm_offs = OMAP4430_PRM_EMU_CM_EMU_CDOFFS,
.flags = (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_FORCE_WAKEUP |
CLKDM_MISSING_IDLE_REPORTING),
};
static struct clockdomain l3_dma_44xx_clkdm = {
.name = "l3_dma_clkdm",
.pwrdm = { .name = "core_pwrdm" },
.prcm_partition = OMAP4430_CM2_PARTITION,
.cm_inst = OMAP4430_CM2_CORE_INST,
.clkdm_offs = OMAP4430_CM2_CORE_SDMA_CDOFFS,
.wkdep_srcs = l3_dma_wkup_sleep_deps,
.sleepdep_srcs = l3_dma_wkup_sleep_deps,
.flags = CLKDM_CAN_FORCE_WAKEUP | CLKDM_CAN_HWSUP,
};
/* As clockdomains are added or removed above, this list must also be changed */
static struct clockdomain *clockdomains_omap44xx[] __initdata = {
&l4_cefuse_44xx_clkdm,
&l4_cfg_44xx_clkdm,
&tesla_44xx_clkdm,
&l3_gfx_44xx_clkdm,
&ivahd_44xx_clkdm,
&l4_secure_44xx_clkdm,
&l4_per_44xx_clkdm,
&abe_44xx_clkdm,
&l3_instr_44xx_clkdm,
&l3_init_44xx_clkdm,
&d2d_44xx_clkdm,
&mpu0_44xx_clkdm,
&mpu1_44xx_clkdm,
&l3_emif_44xx_clkdm,
&l4_ao_44xx_clkdm,
&ducati_44xx_clkdm,
&mpu_44xx_clkdm,
&l3_2_44xx_clkdm,
&l3_1_44xx_clkdm,
&iss_44xx_clkdm,
&l3_dss_44xx_clkdm,
&l4_wkup_44xx_clkdm,
&emu_sys_44xx_clkdm,
&l3_dma_44xx_clkdm,
NULL
};
void __init omap44xx_clockdomains_init(void)
{
clkdm_register_platform_funcs(&omap4_clkdm_operations);
clkdm_register_clkdms(clockdomains_omap44xx);
clkdm_complete_init();
}
| gpl-2.0 |
zeroblade1984/LG_MSM8226 | drivers/input/touchscreen/da9034-ts.c | 5042 | 8860 | /*
* Touchscreen driver for Dialog Semiconductor DA9034
*
* Copyright (C) 2006-2008 Marvell International Ltd.
* Fengwei Yin <fengwei.yin@marvell.com>
* Bin Yang <bin.yang@marvell.com>
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/mfd/da903x.h>
#include <linux/slab.h>
#define DA9034_MANUAL_CTRL 0x50
#define DA9034_LDO_ADC_EN (1 << 4)
#define DA9034_AUTO_CTRL1 0x51
#define DA9034_AUTO_CTRL2 0x52
#define DA9034_AUTO_TSI_EN (1 << 3)
#define DA9034_PEN_DETECT (1 << 4)
#define DA9034_TSI_CTRL1 0x53
#define DA9034_TSI_CTRL2 0x54
#define DA9034_TSI_X_MSB 0x6c
#define DA9034_TSI_Y_MSB 0x6d
#define DA9034_TSI_XY_LSB 0x6e
enum {
STATE_IDLE, /* wait for pendown */
STATE_BUSY, /* TSI busy sampling */
STATE_STOP, /* sample available */
STATE_WAIT, /* Wait to start next sample */
};
enum {
EVENT_PEN_DOWN,
EVENT_PEN_UP,
EVENT_TSI_READY,
EVENT_TIMEDOUT,
};
struct da9034_touch {
struct device *da9034_dev;
struct input_dev *input_dev;
struct delayed_work tsi_work;
struct notifier_block notifier;
int state;
int interval_ms;
int x_inverted;
int y_inverted;
int last_x;
int last_y;
};
static inline int is_pen_down(struct da9034_touch *touch)
{
return da903x_query_status(touch->da9034_dev, DA9034_STATUS_PEN_DOWN);
}
static inline int detect_pen_down(struct da9034_touch *touch, int on)
{
if (on)
return da903x_set_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_PEN_DETECT);
else
return da903x_clr_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_PEN_DETECT);
}
static int read_tsi(struct da9034_touch *touch)
{
uint8_t _x, _y, _v;
int ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_X_MSB, &_x);
if (ret)
return ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_Y_MSB, &_y);
if (ret)
return ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_XY_LSB, &_v);
if (ret)
return ret;
touch->last_x = ((_x << 2) & 0x3fc) | (_v & 0x3);
touch->last_y = ((_y << 2) & 0x3fc) | ((_v & 0xc) >> 2);
return 0;
}
static inline int start_tsi(struct da9034_touch *touch)
{
return da903x_set_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN);
}
static inline int stop_tsi(struct da9034_touch *touch)
{
return da903x_clr_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN);
}
static inline void report_pen_down(struct da9034_touch *touch)
{
int x = touch->last_x;
int y = touch->last_y;
x &= 0xfff;
if (touch->x_inverted)
x = 1024 - x;
y &= 0xfff;
if (touch->y_inverted)
y = 1024 - y;
input_report_abs(touch->input_dev, ABS_X, x);
input_report_abs(touch->input_dev, ABS_Y, y);
input_report_key(touch->input_dev, BTN_TOUCH, 1);
input_sync(touch->input_dev);
}
static inline void report_pen_up(struct da9034_touch *touch)
{
input_report_key(touch->input_dev, BTN_TOUCH, 0);
input_sync(touch->input_dev);
}
static void da9034_event_handler(struct da9034_touch *touch, int event)
{
int err;
switch (touch->state) {
case STATE_IDLE:
if (event != EVENT_PEN_DOWN)
break;
/* Enable auto measurement of the TSI, this will
* automatically disable pen down detection
*/
err = start_tsi(touch);
if (err)
goto err_reset;
touch->state = STATE_BUSY;
break;
case STATE_BUSY:
if (event != EVENT_TSI_READY)
break;
err = read_tsi(touch);
if (err)
goto err_reset;
/* Disable auto measurement of the TSI, so that
* pen down status will be available
*/
err = stop_tsi(touch);
if (err)
goto err_reset;
touch->state = STATE_STOP;
/* FIXME: PEN_{UP/DOWN} events are expected to be
* available by stopping TSI, but this is found not
* always true, delay and simulate such an event
* here is more reliable
*/
mdelay(1);
da9034_event_handler(touch,
is_pen_down(touch) ? EVENT_PEN_DOWN :
EVENT_PEN_UP);
break;
case STATE_STOP:
if (event == EVENT_PEN_DOWN) {
report_pen_down(touch);
schedule_delayed_work(&touch->tsi_work,
msecs_to_jiffies(touch->interval_ms));
touch->state = STATE_WAIT;
}
if (event == EVENT_PEN_UP) {
report_pen_up(touch);
touch->state = STATE_IDLE;
}
break;
case STATE_WAIT:
if (event != EVENT_TIMEDOUT)
break;
if (is_pen_down(touch)) {
start_tsi(touch);
touch->state = STATE_BUSY;
} else {
report_pen_up(touch);
touch->state = STATE_IDLE;
}
break;
}
return;
err_reset:
touch->state = STATE_IDLE;
stop_tsi(touch);
detect_pen_down(touch, 1);
}
static void da9034_tsi_work(struct work_struct *work)
{
struct da9034_touch *touch =
container_of(work, struct da9034_touch, tsi_work.work);
da9034_event_handler(touch, EVENT_TIMEDOUT);
}
static int da9034_touch_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct da9034_touch *touch =
container_of(nb, struct da9034_touch, notifier);
if (event & DA9034_EVENT_TSI_READY)
da9034_event_handler(touch, EVENT_TSI_READY);
if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE)
da9034_event_handler(touch, EVENT_PEN_DOWN);
return 0;
}
static int da9034_touch_open(struct input_dev *dev)
{
struct da9034_touch *touch = input_get_drvdata(dev);
int ret;
ret = da903x_register_notifier(touch->da9034_dev, &touch->notifier,
DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY);
if (ret)
return -EBUSY;
/* Enable ADC LDO */
ret = da903x_set_bits(touch->da9034_dev,
DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN);
if (ret)
return ret;
/* TSI_DELAY: 3 slots, TSI_SKIP: 3 slots */
ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL1, 0x1b);
if (ret)
return ret;
ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL2, 0x00);
if (ret)
return ret;
touch->state = STATE_IDLE;
detect_pen_down(touch, 1);
return 0;
}
static void da9034_touch_close(struct input_dev *dev)
{
struct da9034_touch *touch = input_get_drvdata(dev);
da903x_unregister_notifier(touch->da9034_dev, &touch->notifier,
DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY);
cancel_delayed_work_sync(&touch->tsi_work);
touch->state = STATE_IDLE;
stop_tsi(touch);
detect_pen_down(touch, 0);
/* Disable ADC LDO */
da903x_clr_bits(touch->da9034_dev,
DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN);
}
static int __devinit da9034_touch_probe(struct platform_device *pdev)
{
struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
struct da9034_touch *touch;
struct input_dev *input_dev;
int ret;
touch = kzalloc(sizeof(struct da9034_touch), GFP_KERNEL);
if (touch == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
touch->da9034_dev = pdev->dev.parent;
if (pdata) {
touch->interval_ms = pdata->interval_ms;
touch->x_inverted = pdata->x_inverted;
touch->y_inverted = pdata->y_inverted;
} else
/* fallback into default */
touch->interval_ms = 10;
INIT_DELAYED_WORK(&touch->tsi_work, da9034_tsi_work);
touch->notifier.notifier_call = da9034_touch_notifier;
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(&pdev->dev, "failed to allocate input device\n");
ret = -ENOMEM;
goto err_free_touch;
}
input_dev->name = pdev->name;
input_dev->open = da9034_touch_open;
input_dev->close = da9034_touch_close;
input_dev->dev.parent = &pdev->dev;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(ABS_X, input_dev->absbit);
__set_bit(ABS_Y, input_dev->absbit);
input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
touch->input_dev = input_dev;
input_set_drvdata(input_dev, touch);
ret = input_register_device(input_dev);
if (ret)
goto err_free_input;
platform_set_drvdata(pdev, touch);
return 0;
err_free_input:
input_free_device(input_dev);
err_free_touch:
kfree(touch);
return ret;
}
static int __devexit da9034_touch_remove(struct platform_device *pdev)
{
struct da9034_touch *touch = platform_get_drvdata(pdev);
input_unregister_device(touch->input_dev);
kfree(touch);
return 0;
}
static struct platform_driver da9034_touch_driver = {
.driver = {
.name = "da9034-touch",
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
.remove = __devexit_p(da9034_touch_remove),
};
module_platform_driver(da9034_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9034-touch");
| gpl-2.0 |
CyanogenMod/android_kernel_xiaomi_cancro | arch/arm/mach-zynq/timer.c | 5554 | 9036 | /*
* This file contains driver for the Xilinx PS Timer Counter IP.
*
* Copyright (C) 2011 Xilinx
*
* based on arch/mips/kernel/time.c timer driver
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <asm/mach/time.h>
#include <mach/zynq_soc.h>
#include "common.h"
#define IRQ_TIMERCOUNTER0 42
/*
* This driver configures the 2 16-bit count-up timers as follows:
*
* T1: Timer 1, clocksource for generic timekeeping
* T2: Timer 2, clockevent source for hrtimers
* T3: Timer 3, <unused>
*
* The input frequency to the timer module for emulation is 2.5MHz which is
* common to all the timer channels (T1, T2, and T3). With a pre-scaler of 32,
* the timers are clocked at 78.125KHz (12.8 us resolution).
*
* The input frequency to the timer module in silicon will be 200MHz. With the
* pre-scaler of 32, the timers are clocked at 6.25MHz (160ns resolution).
*/
#define XTTCPSS_CLOCKSOURCE 0 /* Timer 1 as a generic timekeeping */
#define XTTCPSS_CLOCKEVENT 1 /* Timer 2 as a clock event */
#define XTTCPSS_TIMER_BASE TTC0_BASE
#define XTTCPCC_EVENT_TIMER_IRQ (IRQ_TIMERCOUNTER0 + 1)
/*
* Timer Register Offset Definitions of Timer 1, Increment base address by 4
* and use same offsets for Timer 2
*/
#define XTTCPSS_CLK_CNTRL_OFFSET 0x00 /* Clock Control Reg, RW */
#define XTTCPSS_CNT_CNTRL_OFFSET 0x0C /* Counter Control Reg, RW */
#define XTTCPSS_COUNT_VAL_OFFSET 0x18 /* Counter Value Reg, RO */
#define XTTCPSS_INTR_VAL_OFFSET 0x24 /* Interval Count Reg, RW */
#define XTTCPSS_MATCH_1_OFFSET 0x30 /* Match 1 Value Reg, RW */
#define XTTCPSS_MATCH_2_OFFSET 0x3C /* Match 2 Value Reg, RW */
#define XTTCPSS_MATCH_3_OFFSET 0x48 /* Match 3 Value Reg, RW */
#define XTTCPSS_ISR_OFFSET 0x54 /* Interrupt Status Reg, RO */
#define XTTCPSS_IER_OFFSET 0x60 /* Interrupt Enable Reg, RW */
#define XTTCPSS_CNT_CNTRL_DISABLE_MASK 0x1
/* Setup the timers to use pre-scaling */
#define TIMER_RATE (PERIPHERAL_CLOCK_RATE / 32)
/**
* struct xttcpss_timer - This definition defines local timer structure
*
* @base_addr: Base address of timer
**/
struct xttcpss_timer {
void __iomem *base_addr;
};
static struct xttcpss_timer timers[2];
static struct clock_event_device xttcpss_clockevent;
/**
* xttcpss_set_interval - Set the timer interval value
*
* @timer: Pointer to the timer instance
* @cycles: Timer interval ticks
**/
static void xttcpss_set_interval(struct xttcpss_timer *timer,
unsigned long cycles)
{
u32 ctrl_reg;
/* Disable the counter, set the counter value and re-enable counter */
ctrl_reg = __raw_readl(timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
ctrl_reg |= XTTCPSS_CNT_CNTRL_DISABLE_MASK;
__raw_writel(ctrl_reg, timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
__raw_writel(cycles, timer->base_addr + XTTCPSS_INTR_VAL_OFFSET);
/* Reset the counter (0x10) so that it starts from 0, one-shot
mode makes this needed for timing to be right. */
ctrl_reg |= 0x10;
ctrl_reg &= ~XTTCPSS_CNT_CNTRL_DISABLE_MASK;
__raw_writel(ctrl_reg, timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
}
/**
* xttcpss_clock_event_interrupt - Clock event timer interrupt handler
*
* @irq: IRQ number of the Timer
* @dev_id: void pointer to the xttcpss_timer instance
*
* returns: Always IRQ_HANDLED - success
**/
static irqreturn_t xttcpss_clock_event_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &xttcpss_clockevent;
struct xttcpss_timer *timer = dev_id;
/* Acknowledge the interrupt and call event handler */
__raw_writel(__raw_readl(timer->base_addr + XTTCPSS_ISR_OFFSET),
timer->base_addr + XTTCPSS_ISR_OFFSET);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction event_timer_irq = {
.name = "xttcpss clockevent",
.flags = IRQF_DISABLED | IRQF_TIMER,
.handler = xttcpss_clock_event_interrupt,
};
/**
* xttcpss_timer_hardware_init - Initialize the timer hardware
*
* Initialize the hardware to start the clock source, get the clock
* event timer ready to use, and hook up the interrupt.
**/
static void __init xttcpss_timer_hardware_init(void)
{
/* Setup the clock source counter to be an incrementing counter
* with no interrupt and it rolls over at 0xFFFF. Pre-scale
it by 32 also. Let it start running now.
*/
timers[XTTCPSS_CLOCKSOURCE].base_addr = XTTCPSS_TIMER_BASE;
__raw_writel(0x0, timers[XTTCPSS_CLOCKSOURCE].base_addr +
XTTCPSS_IER_OFFSET);
__raw_writel(0x9, timers[XTTCPSS_CLOCKSOURCE].base_addr +
XTTCPSS_CLK_CNTRL_OFFSET);
__raw_writel(0x10, timers[XTTCPSS_CLOCKSOURCE].base_addr +
XTTCPSS_CNT_CNTRL_OFFSET);
/* Setup the clock event timer to be an interval timer which
* is prescaled by 32 using the interval interrupt. Leave it
* disabled for now.
*/
timers[XTTCPSS_CLOCKEVENT].base_addr = XTTCPSS_TIMER_BASE + 4;
__raw_writel(0x23, timers[XTTCPSS_CLOCKEVENT].base_addr +
XTTCPSS_CNT_CNTRL_OFFSET);
__raw_writel(0x9, timers[XTTCPSS_CLOCKEVENT].base_addr +
XTTCPSS_CLK_CNTRL_OFFSET);
__raw_writel(0x1, timers[XTTCPSS_CLOCKEVENT].base_addr +
XTTCPSS_IER_OFFSET);
/* Setup IRQ the clock event timer */
event_timer_irq.dev_id = &timers[XTTCPSS_CLOCKEVENT];
setup_irq(XTTCPCC_EVENT_TIMER_IRQ, &event_timer_irq);
}
/**
* __raw_readl_cycles - Reads the timer counter register
*
* returns: Current timer counter register value
**/
static cycle_t __raw_readl_cycles(struct clocksource *cs)
{
struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKSOURCE];
return (cycle_t)__raw_readl(timer->base_addr +
XTTCPSS_COUNT_VAL_OFFSET);
}
/*
* Instantiate and initialize the clock source structure
*/
static struct clocksource clocksource_xttcpss = {
.name = "xttcpss_timer1",
.rating = 200, /* Reasonable clock source */
.read = __raw_readl_cycles,
.mask = CLOCKSOURCE_MASK(16),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/**
* xttcpss_set_next_event - Sets the time interval for next event
*
* @cycles: Timer interval ticks
* @evt: Address of clock event instance
*
* returns: Always 0 - success
**/
static int xttcpss_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT];
xttcpss_set_interval(timer, cycles);
return 0;
}
/**
* xttcpss_set_mode - Sets the mode of timer
*
* @mode: Mode to be set
* @evt: Address of clock event instance
**/
static void xttcpss_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
struct xttcpss_timer *timer = &timers[XTTCPSS_CLOCKEVENT];
u32 ctrl_reg;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
xttcpss_set_interval(timer, TIMER_RATE / HZ);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
ctrl_reg = __raw_readl(timer->base_addr +
XTTCPSS_CNT_CNTRL_OFFSET);
ctrl_reg |= XTTCPSS_CNT_CNTRL_DISABLE_MASK;
__raw_writel(ctrl_reg,
timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
break;
case CLOCK_EVT_MODE_RESUME:
ctrl_reg = __raw_readl(timer->base_addr +
XTTCPSS_CNT_CNTRL_OFFSET);
ctrl_reg &= ~XTTCPSS_CNT_CNTRL_DISABLE_MASK;
__raw_writel(ctrl_reg,
timer->base_addr + XTTCPSS_CNT_CNTRL_OFFSET);
break;
}
}
/*
* Instantiate and initialize the clock event structure
*/
static struct clock_event_device xttcpss_clockevent = {
.name = "xttcpss_timer2",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = xttcpss_set_next_event,
.set_mode = xttcpss_set_mode,
.rating = 200,
};
/**
* xttcpss_timer_init - Initialize the timer
*
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
**/
static void __init xttcpss_timer_init(void)
{
xttcpss_timer_hardware_init();
clocksource_register_hz(&clocksource_xttcpss, TIMER_RATE);
/* Calculate the parameters to allow the clockevent to operate using
integer math
*/
clockevents_calc_mult_shift(&xttcpss_clockevent, TIMER_RATE, 4);
xttcpss_clockevent.max_delta_ns =
clockevent_delta2ns(0xfffe, &xttcpss_clockevent);
xttcpss_clockevent.min_delta_ns =
clockevent_delta2ns(1, &xttcpss_clockevent);
/* Indicate that clock event is on 1st CPU as SMP boot needs it */
xttcpss_clockevent.cpumask = cpumask_of(0);
clockevents_register_device(&xttcpss_clockevent);
}
/*
* Instantiate and initialize the system timer structure
*/
struct sys_timer xttcpss_sys_timer = {
.init = xttcpss_timer_init,
};
| gpl-2.0 |
SaatvikShukla/android_kernel_oneplus_msm8974 | arch/h8300/kernel/gpio.c | 9650 | 3641 | /*
* linux/arch/h8300/kernel/gpio.c
*
* Yoshinori Sato <ysato@users.sourceforge.jp>
*
*/
/*
* Internal I/O Port Management
*/
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/init.h>
#define _(addr) (volatile unsigned char *)(addr)
#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
#include <asm/regs306x.h>
static volatile unsigned char *ddrs[] = {
_(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
};
#define MAX_PORT 11
#endif
#if defined(CONFIG_H83002) || defined(CONFIG_H8048)
/* Fix me!! */
#include <asm/regs306x.h>
static volatile unsigned char *ddrs[] = {
_(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
};
#define MAX_PORT 11
#endif
#if defined(CONFIG_H8S2678)
#include <asm/regs267x.h>
static volatile unsigned char *ddrs[] = {
_(P1DDR),_(P2DDR),_(P3DDR),NULL ,_(P5DDR),_(P6DDR),
_(P7DDR),_(P8DDR),NULL, _(PADDR),_(PBDDR),_(PCDDR),
_(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR),
_(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR),
_(PGDDR),_(PHDDR)
};
#define MAX_PORT 17
#endif
#undef _
#if !defined(P1DDR)
#error Unsuppoted CPU Selection
#endif
static struct {
unsigned char used;
unsigned char ddr;
} gpio_regs[MAX_PORT];
extern char *_platform_gpio_table(int length);
int h8300_reserved_gpio(int port, unsigned int bits)
{
unsigned char *used;
if (port < 0 || port >= MAX_PORT)
return -1;
used = &(gpio_regs[port].used);
if ((*used & bits) != 0)
return 0;
*used |= bits;
return 1;
}
int h8300_free_gpio(int port, unsigned int bits)
{
unsigned char *used;
if (port < 0 || port >= MAX_PORT)
return -1;
used = &(gpio_regs[port].used);
if ((*used & bits) != bits)
return 0;
*used &= (~bits);
return 1;
}
int h8300_set_gpio_dir(int port_bit,int dir)
{
int port = (port_bit >> 8) & 0xff;
int bit = port_bit & 0xff;
if (ddrs[port] == NULL)
return 0;
if (gpio_regs[port].used & bit) {
if (dir)
gpio_regs[port].ddr |= bit;
else
gpio_regs[port].ddr &= ~bit;
*ddrs[port] = gpio_regs[port].ddr;
return 1;
} else
return 0;
}
int h8300_get_gpio_dir(int port_bit)
{
int port = (port_bit >> 8) & 0xff;
int bit = port_bit & 0xff;
if (ddrs[port] == NULL)
return 0;
if (gpio_regs[port].used & bit) {
return (gpio_regs[port].ddr & bit) != 0;
} else
return -1;
}
#if defined(CONFIG_PROC_FS)
static char *port_status(int portno)
{
static char result[10];
static const char io[2]={'I','O'};
char *rp;
int c;
unsigned char used,ddr;
used = gpio_regs[portno].used;
ddr = gpio_regs[portno].ddr;
result[8]='\0';
rp = result + 7;
for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1)
if (used & 0x01)
*rp = io[ ddr & 0x01];
else
*rp = '-';
return result;
}
static int gpio_proc_read(char *buf, char **start, off_t offset,
int len, int *unused_i, void *unused_v)
{
int c,outlen;
static const char port_name[]="123456789ABCDEFGH";
outlen = 0;
for (c = 0; c < MAX_PORT; c++) {
if (ddrs[c] == NULL)
continue ;
len = sprintf(buf,"P%c: %s\n",port_name[c],port_status(c));
buf += len;
outlen += len;
}
return outlen;
}
static __init int register_proc(void)
{
struct proc_dir_entry *proc_gpio;
proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL);
if (proc_gpio)
proc_gpio->read_proc = gpio_proc_read;
return proc_gpio != NULL;
}
__initcall(register_proc);
#endif
void __init h8300_gpio_init(void)
{
memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs));
}
| gpl-2.0 |
profglavcho/mt6735-kernel-3.10.61 | drivers/mfd/htc-egpio.c | 10418 | 11081 | /*
* Support for the GPIO/IRQ expander chips present on several HTC phones.
* These are implemented in CPLD chips present on the board.
*
* Copyright (c) 2007 Kevin O'Connor <kevin@koconnor.net>
* Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
*
* This file may be distributed under the terms of the GNU GPL license.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mfd/htc-egpio.h>
struct egpio_chip {
int reg_start;
int cached_values;
unsigned long is_out;
struct device *dev;
struct gpio_chip chip;
};
struct egpio_info {
spinlock_t lock;
/* iomem info */
void __iomem *base_addr;
int bus_shift; /* byte shift */
int reg_shift; /* bit shift */
int reg_mask;
/* irq info */
int ack_register;
int ack_write;
u16 irqs_enabled;
uint irq_start;
int nirqs;
uint chained_irq;
/* egpio info */
struct egpio_chip *chip;
int nchips;
};
static inline void egpio_writew(u16 value, struct egpio_info *ei, int reg)
{
writew(value, ei->base_addr + (reg << ei->bus_shift));
}
static inline u16 egpio_readw(struct egpio_info *ei, int reg)
{
return readw(ei->base_addr + (reg << ei->bus_shift));
}
/*
* IRQs
*/
static inline void ack_irqs(struct egpio_info *ei)
{
egpio_writew(ei->ack_write, ei, ei->ack_register);
pr_debug("EGPIO ack - write %x to base+%x\n",
ei->ack_write, ei->ack_register << ei->bus_shift);
}
static void egpio_ack(struct irq_data *data)
{
}
/* There does not appear to be a way to proactively mask interrupts
* on the egpio chip itself. So, we simply ignore interrupts that
* aren't desired. */
static void egpio_mask(struct irq_data *data)
{
struct egpio_info *ei = irq_data_get_irq_chip_data(data);
ei->irqs_enabled &= ~(1 << (data->irq - ei->irq_start));
pr_debug("EGPIO mask %d %04x\n", data->irq, ei->irqs_enabled);
}
static void egpio_unmask(struct irq_data *data)
{
struct egpio_info *ei = irq_data_get_irq_chip_data(data);
ei->irqs_enabled |= 1 << (data->irq - ei->irq_start);
pr_debug("EGPIO unmask %d %04x\n", data->irq, ei->irqs_enabled);
}
static struct irq_chip egpio_muxed_chip = {
.name = "htc-egpio",
.irq_ack = egpio_ack,
.irq_mask = egpio_mask,
.irq_unmask = egpio_unmask,
};
static void egpio_handler(unsigned int irq, struct irq_desc *desc)
{
struct egpio_info *ei = irq_desc_get_handler_data(desc);
int irqpin;
/* Read current pins. */
unsigned long readval = egpio_readw(ei, ei->ack_register);
pr_debug("IRQ reg: %x\n", (unsigned int)readval);
/* Ack/unmask interrupts. */
ack_irqs(ei);
/* Process all set pins. */
readval &= ei->irqs_enabled;
for_each_set_bit(irqpin, &readval, ei->nirqs) {
/* Run irq handler */
pr_debug("got IRQ %d\n", irqpin);
generic_handle_irq(ei->irq_start + irqpin);
}
}
int htc_egpio_get_wakeup_irq(struct device *dev)
{
struct egpio_info *ei = dev_get_drvdata(dev);
/* Read current pins. */
u16 readval = egpio_readw(ei, ei->ack_register);
/* Ack/unmask interrupts. */
ack_irqs(ei);
/* Return first set pin. */
readval &= ei->irqs_enabled;
return ei->irq_start + ffs(readval) - 1;
}
EXPORT_SYMBOL(htc_egpio_get_wakeup_irq);
static inline int egpio_pos(struct egpio_info *ei, int bit)
{
return bit >> ei->reg_shift;
}
static inline int egpio_bit(struct egpio_info *ei, int bit)
{
return 1 << (bit & ((1 << ei->reg_shift)-1));
}
/*
* Input pins
*/
static int egpio_get(struct gpio_chip *chip, unsigned offset)
{
struct egpio_chip *egpio;
struct egpio_info *ei;
unsigned bit;
int reg;
int value;
pr_debug("egpio_get_value(%d)\n", chip->base + offset);
egpio = container_of(chip, struct egpio_chip, chip);
ei = dev_get_drvdata(egpio->dev);
bit = egpio_bit(ei, offset);
reg = egpio->reg_start + egpio_pos(ei, offset);
value = egpio_readw(ei, reg);
pr_debug("readw(%p + %x) = %x\n",
ei->base_addr, reg << ei->bus_shift, value);
return value & bit;
}
static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct egpio_chip *egpio;
egpio = container_of(chip, struct egpio_chip, chip);
return test_bit(offset, &egpio->is_out) ? -EINVAL : 0;
}
/*
* Output pins
*/
static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
unsigned long flag;
struct egpio_chip *egpio;
struct egpio_info *ei;
unsigned bit;
int pos;
int reg;
int shift;
pr_debug("egpio_set(%s, %d(%d), %d)\n",
chip->label, offset, offset+chip->base, value);
egpio = container_of(chip, struct egpio_chip, chip);
ei = dev_get_drvdata(egpio->dev);
bit = egpio_bit(ei, offset);
pos = egpio_pos(ei, offset);
reg = egpio->reg_start + pos;
shift = pos << ei->reg_shift;
pr_debug("egpio %s: reg %d = 0x%04x\n", value ? "set" : "clear",
reg, (egpio->cached_values >> shift) & ei->reg_mask);
spin_lock_irqsave(&ei->lock, flag);
if (value)
egpio->cached_values |= (1 << offset);
else
egpio->cached_values &= ~(1 << offset);
egpio_writew((egpio->cached_values >> shift) & ei->reg_mask, ei, reg);
spin_unlock_irqrestore(&ei->lock, flag);
}
static int egpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct egpio_chip *egpio;
egpio = container_of(chip, struct egpio_chip, chip);
if (test_bit(offset, &egpio->is_out)) {
egpio_set(chip, offset, value);
return 0;
} else {
return -EINVAL;
}
}
static void egpio_write_cache(struct egpio_info *ei)
{
int i;
struct egpio_chip *egpio;
int shift;
for (i = 0; i < ei->nchips; i++) {
egpio = &(ei->chip[i]);
if (!egpio->is_out)
continue;
for (shift = 0; shift < egpio->chip.ngpio;
shift += (1<<ei->reg_shift)) {
int reg = egpio->reg_start + egpio_pos(ei, shift);
if (!((egpio->is_out >> shift) & ei->reg_mask))
continue;
pr_debug("EGPIO: setting %x to %x, was %x\n", reg,
(egpio->cached_values >> shift) & ei->reg_mask,
egpio_readw(ei, reg));
egpio_writew((egpio->cached_values >> shift)
& ei->reg_mask, ei, reg);
}
}
}
/*
* Setup
*/
static int __init egpio_probe(struct platform_device *pdev)
{
struct htc_egpio_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
struct egpio_info *ei;
struct gpio_chip *chip;
unsigned int irq, irq_end;
int i;
int ret;
/* Initialize ei data structure. */
ei = kzalloc(sizeof(*ei), GFP_KERNEL);
if (!ei)
return -ENOMEM;
spin_lock_init(&ei->lock);
/* Find chained irq */
ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
ei->chained_irq = res->start;
/* Map egpio chip into virtual address space. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
goto fail;
ei->base_addr = ioremap_nocache(res->start, resource_size(res));
if (!ei->base_addr)
goto fail;
pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr);
if ((pdata->bus_width != 16) && (pdata->bus_width != 32))
goto fail;
ei->bus_shift = fls(pdata->bus_width - 1) - 3;
pr_debug("bus_shift = %d\n", ei->bus_shift);
if ((pdata->reg_width != 8) && (pdata->reg_width != 16))
goto fail;
ei->reg_shift = fls(pdata->reg_width - 1);
pr_debug("reg_shift = %d\n", ei->reg_shift);
ei->reg_mask = (1 << pdata->reg_width) - 1;
platform_set_drvdata(pdev, ei);
ei->nchips = pdata->num_chips;
ei->chip = kzalloc(sizeof(struct egpio_chip) * ei->nchips, GFP_KERNEL);
if (!ei->chip) {
ret = -ENOMEM;
goto fail;
}
for (i = 0; i < ei->nchips; i++) {
ei->chip[i].reg_start = pdata->chip[i].reg_start;
ei->chip[i].cached_values = pdata->chip[i].initial_values;
ei->chip[i].is_out = pdata->chip[i].direction;
ei->chip[i].dev = &(pdev->dev);
chip = &(ei->chip[i].chip);
chip->label = "htc-egpio";
chip->dev = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->base = pdata->chip[i].gpio_base;
chip->ngpio = pdata->chip[i].num_gpios;
gpiochip_add(chip);
}
/* Set initial pin values */
egpio_write_cache(ei);
ei->irq_start = pdata->irq_base;
ei->nirqs = pdata->num_irqs;
ei->ack_register = pdata->ack_register;
if (ei->chained_irq) {
/* Setup irq handlers */
ei->ack_write = 0xFFFF;
if (pdata->invert_acks)
ei->ack_write = 0;
irq_end = ei->irq_start + ei->nirqs;
for (irq = ei->irq_start; irq < irq_end; irq++) {
irq_set_chip_and_handler(irq, &egpio_muxed_chip,
handle_simple_irq);
irq_set_chip_data(irq, ei);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
irq_set_handler_data(ei->chained_irq, ei);
irq_set_chained_handler(ei->chained_irq, egpio_handler);
ack_irqs(ei);
device_init_wakeup(&pdev->dev, 1);
}
return 0;
fail:
printk(KERN_ERR "EGPIO failed to setup\n");
kfree(ei);
return ret;
}
static int __exit egpio_remove(struct platform_device *pdev)
{
struct egpio_info *ei = platform_get_drvdata(pdev);
unsigned int irq, irq_end;
if (ei->chained_irq) {
irq_end = ei->irq_start + ei->nirqs;
for (irq = ei->irq_start; irq < irq_end; irq++) {
irq_set_chip_and_handler(irq, NULL, NULL);
set_irq_flags(irq, 0);
}
irq_set_chained_handler(ei->chained_irq, NULL);
device_init_wakeup(&pdev->dev, 0);
}
iounmap(ei->base_addr);
kfree(ei->chip);
kfree(ei);
return 0;
}
#ifdef CONFIG_PM
static int egpio_suspend(struct platform_device *pdev, pm_message_t state)
{
struct egpio_info *ei = platform_get_drvdata(pdev);
if (ei->chained_irq && device_may_wakeup(&pdev->dev))
enable_irq_wake(ei->chained_irq);
return 0;
}
static int egpio_resume(struct platform_device *pdev)
{
struct egpio_info *ei = platform_get_drvdata(pdev);
if (ei->chained_irq && device_may_wakeup(&pdev->dev))
disable_irq_wake(ei->chained_irq);
/* Update registers from the cache, in case
the CPLD was powered off during suspend */
egpio_write_cache(ei);
return 0;
}
#else
#define egpio_suspend NULL
#define egpio_resume NULL
#endif
static struct platform_driver egpio_driver = {
.driver = {
.name = "htc-egpio",
},
.remove = __exit_p(egpio_remove),
.suspend = egpio_suspend,
.resume = egpio_resume,
};
static int __init egpio_init(void)
{
return platform_driver_probe(&egpio_driver, egpio_probe);
}
static void __exit egpio_exit(void)
{
platform_driver_unregister(&egpio_driver);
}
/* start early for dependencies */
subsys_initcall(egpio_init);
module_exit(egpio_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kevin O'Connor <kevin@koconnor.net>");
| gpl-2.0 |
schnitzeltony/linux | drivers/spi/spi-bcm2835.c | 179 | 23808 | /*
* Driver for Broadcom BCM2835 SPI Controllers
*
* Copyright (C) 2012 Chris Boot
* Copyright (C) 2013 Stephen Warren
* Copyright (C) 2015 Martin Sperl
*
* This driver is inspired by:
* spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
* spi-atmel.c, Copyright (C) 2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/page.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spi/spi.h>
/* SPI register offsets */
#define BCM2835_SPI_CS 0x00
#define BCM2835_SPI_FIFO 0x04
#define BCM2835_SPI_CLK 0x08
#define BCM2835_SPI_DLEN 0x0c
#define BCM2835_SPI_LTOH 0x10
#define BCM2835_SPI_DC 0x14
/* Bitfields in CS */
#define BCM2835_SPI_CS_LEN_LONG 0x02000000
#define BCM2835_SPI_CS_DMA_LEN 0x01000000
#define BCM2835_SPI_CS_CSPOL2 0x00800000
#define BCM2835_SPI_CS_CSPOL1 0x00400000
#define BCM2835_SPI_CS_CSPOL0 0x00200000
#define BCM2835_SPI_CS_RXF 0x00100000
#define BCM2835_SPI_CS_RXR 0x00080000
#define BCM2835_SPI_CS_TXD 0x00040000
#define BCM2835_SPI_CS_RXD 0x00020000
#define BCM2835_SPI_CS_DONE 0x00010000
#define BCM2835_SPI_CS_LEN 0x00002000
#define BCM2835_SPI_CS_REN 0x00001000
#define BCM2835_SPI_CS_ADCS 0x00000800
#define BCM2835_SPI_CS_INTR 0x00000400
#define BCM2835_SPI_CS_INTD 0x00000200
#define BCM2835_SPI_CS_DMAEN 0x00000100
#define BCM2835_SPI_CS_TA 0x00000080
#define BCM2835_SPI_CS_CSPOL 0x00000040
#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
#define BCM2835_SPI_CS_CPOL 0x00000008
#define BCM2835_SPI_CS_CPHA 0x00000004
#define BCM2835_SPI_CS_CS_10 0x00000002
#define BCM2835_SPI_CS_CS_01 0x00000001
#define BCM2835_SPI_POLLING_LIMIT_US 30
#define BCM2835_SPI_POLLING_JIFFIES 2
#define BCM2835_SPI_DMA_MIN_LENGTH 96
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS | SPI_3WIRE)
#define DRV_NAME "spi-bcm2835"
struct bcm2835_spi {
void __iomem *regs;
struct clk *clk;
int irq;
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
int rx_len;
bool dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
{
return readl(bs->regs + reg);
}
static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
{
writel(val, bs->regs + reg);
}
static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
{
u8 byte;
while ((bs->rx_len) &&
(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
if (bs->rx_buf)
*bs->rx_buf++ = byte;
bs->rx_len--;
}
}
static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
{
u8 byte;
while ((bs->tx_len) &&
(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
byte = bs->tx_buf ? *bs->tx_buf++ : 0;
bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
bs->tx_len--;
}
}
static void bcm2835_spi_reset_hw(struct spi_master *master)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* Disable SPI interrupts and transfer */
cs &= ~(BCM2835_SPI_CS_INTR |
BCM2835_SPI_CS_INTD |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_TA);
/* and reset RX/TX FIFOS */
cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
/* and reset the SPI_HW */
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
/* as well as DLEN */
bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
}
static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* Read as many bytes as possible from FIFO */
bcm2835_rd_fifo(bs);
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
/* based on flags decide if we can finish the transfer */
if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* wake up the framework */
complete(&master->xfer_completion);
}
return IRQ_HANDLED;
}
static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* fill in fifo if we have gpio-cs
* note that there have been rare events where the native-CS
* flapped for <1us which may change the behaviour
* with gpio-cs this does not happen, so it is implemented
* only for this case
*/
if (gpio_is_valid(spi->cs_gpio)) {
/* enable HW block, but without interrupts enabled
* this would triggern an immediate interrupt
*/
bcm2835_wr(bs, BCM2835_SPI_CS,
cs | BCM2835_SPI_CS_TA);
/* fill in tx fifo as much as possible */
bcm2835_wr_fifo(bs);
}
/*
* Enable the HW block. This will immediately trigger a DONE (TX
* empty) interrupt, upon which we will fill the TX FIFO with the
* first TX bytes. Pre-filling the TX FIFO here to avoid the
* interrupt doesn't work:-(
*/
cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
/* signal that we need to wait for completion */
return 1;
}
/*
* DMA support
*
* this implementation has currently a few issues in so far as it does
* not work arrount limitations of the HW.
*
* the main one being that DMA transfers are limited to 16 bit
* (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
*
* also we currently assume that the scatter-gather fragments are
* all multiple of 4 (except the last) - otherwise we would need
* to reset the FIFO before subsequent transfers...
* this also means that tx/rx transfers sg's need to be of equal size!
*
* there may be a few more border-cases we may need to address as well
* but unfortunately this would mean splitting up the scatter-gather
* list making it slightly unpractical...
*/
static void bcm2835_spi_dma_done(void *data)
{
struct spi_master *master = data;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* reset fifo and HW */
bcm2835_spi_reset_hw(master);
/* and terminate tx-dma as we do not have an irq for it
* because when the rx dma will terminate and this callback
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
dmaengine_terminate_all(master->dma_tx);
/* mark as no longer pending */
bs->dma_pending = 0;
/* and mark as completed */;
complete(&master->xfer_completion);
}
static int bcm2835_spi_prepare_sg(struct spi_master *master,
struct spi_transfer *tfr,
bool is_tx)
{
struct dma_chan *chan;
struct scatterlist *sgl;
unsigned int nents;
enum dma_transfer_direction dir;
unsigned long flags;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
if (is_tx) {
dir = DMA_MEM_TO_DEV;
chan = master->dma_tx;
nents = tfr->tx_sg.nents;
sgl = tfr->tx_sg.sgl;
flags = 0 /* no tx interrupt */;
} else {
dir = DMA_DEV_TO_MEM;
chan = master->dma_rx;
nents = tfr->rx_sg.nents;
sgl = tfr->rx_sg.sgl;
flags = DMA_PREP_INTERRUPT;
}
/* prepare the channel */
desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
if (!desc)
return -EINVAL;
/* set callback for rx */
if (!is_tx) {
desc->callback = bcm2835_spi_dma_done;
desc->callback_param = master;
}
/* submit it to DMA-engine */
cookie = dmaengine_submit(desc);
return dma_submit_error(cookie);
}
static inline int bcm2835_check_sg_length(struct sg_table *sgt)
{
int i;
struct scatterlist *sgl;
/* check that the sg entries are word-sized (except for last) */
for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) {
if (sg_dma_len(sgl) % 4)
return -EFAULT;
}
return 0;
}
static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
int ret;
/* check that the scatter gather segments are all a multiple of 4 */
if (bcm2835_check_sg_length(&tfr->tx_sg) ||
bcm2835_check_sg_length(&tfr->rx_sg)) {
dev_warn_once(&spi->dev,
"scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n");
return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
}
/* setup tx-DMA */
ret = bcm2835_spi_prepare_sg(master, tfr, true);
if (ret)
return ret;
/* start TX early */
dma_async_issue_pending(master->dma_tx);
/* mark as dma pending */
bs->dma_pending = 1;
/* set the DMA length */
bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len);
/* start the HW */
bcm2835_wr(bs, BCM2835_SPI_CS,
cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
/* setup rx-DMA late - to run transfers while
* mapping of the rx buffers still takes place
* this saves 10us or more.
*/
ret = bcm2835_spi_prepare_sg(master, tfr, false);
if (ret) {
/* need to reset on errors */
dmaengine_terminate_all(master->dma_tx);
bcm2835_spi_reset_hw(master);
return ret;
}
/* start rx dma late */
dma_async_issue_pending(master->dma_rx);
/* wait for wakeup in framework */
return 1;
}
static bool bcm2835_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
/* only run for gpio_cs */
if (!gpio_is_valid(spi->cs_gpio))
return false;
/* we start DMA efforts only on bigger transfers */
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
/* BCM2835_SPI_DLEN has defined a max transfer size as
* 16 bit, so max is 65535
* we can revisit this by using an alternative transfer
* method - ideally this would get done without any more
* interaction...
*/
if (tfr->len > 65535) {
dev_warn_once(&spi->dev,
"transfer size of %d too big for dma-transfer\n",
tfr->len);
return false;
}
/* if we run rx/tx_buf with word aligned addresses then we are OK */
if ((((size_t)tfr->rx_buf & 3) == 0) &&
(((size_t)tfr->tx_buf & 3) == 0))
return true;
/* otherwise we only allow transfers within the same page
* to avoid wasting time on dma_mapping when it is not practical
*/
if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
dev_warn_once(&spi->dev,
"Unaligned spi tx-transfer bridging page\n");
return false;
}
if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
dev_warn_once(&spi->dev,
"Unaligned spi rx-transfer bridging page\n");
return false;
}
/* return OK */
return true;
}
static void bcm2835_dma_release(struct spi_master *master)
{
if (master->dma_tx) {
dmaengine_terminate_all(master->dma_tx);
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
}
if (master->dma_rx) {
dmaengine_terminate_all(master->dma_rx);
dma_release_channel(master->dma_rx);
master->dma_rx = NULL;
}
}
static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
{
struct dma_slave_config slave_config;
const __be32 *addr;
dma_addr_t dma_reg_base;
int ret;
/* base address in dma-space */
addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
if (!addr) {
dev_err(dev, "could not get DMA-register address - not using dma mode\n");
goto err;
}
dma_reg_base = be32_to_cpup(addr);
/* get tx/rx dma */
master->dma_tx = dma_request_slave_channel(dev, "tx");
if (!master->dma_tx) {
dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
goto err;
}
master->dma_rx = dma_request_slave_channel(dev, "rx");
if (!master->dma_rx) {
dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
goto err_release;
}
/* configure DMAs */
slave_config.direction = DMA_MEM_TO_DEV;
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(master->dma_tx, &slave_config);
if (ret)
goto err_config;
slave_config.direction = DMA_DEV_TO_MEM;
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ret = dmaengine_slave_config(master->dma_rx, &slave_config);
if (ret)
goto err_config;
/* all went well, so set can_dma */
master->can_dma = bcm2835_spi_can_dma;
master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
/* need to do TX AND RX DMA, so we need dummy buffers */
master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
return;
err_config:
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
ret);
err_release:
bcm2835_dma_release(master);
err:
return;
}
static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
u32 cs,
unsigned long long xfer_time_us)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
/* enable HW block without interrupts */
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
/* fill in the fifo before timeout calculations
* if we are interrupted here, then the data is
* getting transferred by the HW while we are interrupted
*/
bcm2835_wr_fifo(bs);
/* set the timeout */
timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
/* loop until finished the transfer */
while (bs->rx_len) {
/* fill in tx fifo with remaining data */
bcm2835_wr_fifo(bs);
/* read from fifo as much as possible */
bcm2835_rd_fifo(bs);
/* if there is still data pending to read
* then check the timeout
*/
if (bs->rx_len && time_after(jiffies, timeout)) {
dev_dbg_ratelimited(&spi->dev,
"timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
jiffies - timeout,
bs->tx_len, bs->rx_len);
/* fall back to interrupt mode */
return bcm2835_spi_transfer_one_irq(master, spi,
tfr, cs);
}
}
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* and return without waiting for completion */
return 0;
}
static int bcm2835_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
unsigned long spi_hz, clk_hz, cdiv;
unsigned long spi_used_hz;
unsigned long long xfer_time_us;
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
/* set clock */
spi_hz = tfr->speed_hz;
clk_hz = clk_get_rate(bs->clk);
if (spi_hz >= clk_hz / 2) {
cdiv = 2; /* clk_hz/2 is the fastest we can go */
} else if (spi_hz) {
/* CDIV must be a multiple of two */
cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
cdiv += (cdiv % 2);
if (cdiv >= 65536)
cdiv = 0; /* 0 is the slowest we can go */
} else {
cdiv = 0; /* 0 is the slowest we can go */
}
spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
cs |= BCM2835_SPI_CS_REN;
else
cs &= ~BCM2835_SPI_CS_REN;
/* for gpio_cs set dummy CS so that no HW-CS get changed
* we can not run this in bcm2835_spi_set_cs, as it does
* not get called for cs_gpio cases, so we need to do it here
*/
if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS))
cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
bs->tx_len = tfr->len;
bs->rx_len = tfr->len;
/* calculate the estimated time in us the transfer runs */
xfer_time_us = (unsigned long long)tfr->len
* 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
* 1000000;
do_div(xfer_time_us, spi_used_hz);
/* for short requests run polling*/
if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
return bcm2835_spi_transfer_one_poll(master, spi, tfr,
cs, xfer_time_us);
/* run in dma mode if conditions are right */
if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
}
static int bcm2835_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
if (spi->mode & SPI_CPOL)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
return 0;
}
static void bcm2835_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* if an error occurred and we have an active dma, then terminate */
if (bs->dma_pending) {
dmaengine_terminate_all(master->dma_tx);
dmaengine_terminate_all(master->dma_rx);
bs->dma_pending = 0;
}
/* and reset */
bcm2835_spi_reset_hw(master);
}
static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level)
{
/*
* we can assume that we are "native" as per spi_set_cs
* calling us ONLY when cs_gpio is not set
* we can also assume that we are CS < 3 as per bcm2835_spi_setup
* we would not get called because of error handling there.
* the level passed is the electrical level not enabled/disabled
* so it has to get translated back to enable/disable
* see spi_set_cs in spi.c for the implementation
*/
struct spi_master *master = spi->master;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
bool enable;
/* calculate the enable flag from the passed gpio_level */
enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level;
/* set flags for "reverse" polarity in the registers */
if (spi->mode & SPI_CS_HIGH) {
/* set the correct CS-bits */
cs |= BCM2835_SPI_CS_CSPOL;
cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
} else {
/* clean the CS-bits */
cs &= ~BCM2835_SPI_CS_CSPOL;
cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select);
}
/* select the correct chip_select depending on disabled/enabled */
if (enable) {
/* set cs correctly */
if (spi->mode & SPI_NO_CS) {
/* use the "undefined" chip-select */
cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
} else {
/* set the chip select */
cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
cs |= spi->chip_select;
}
} else {
/* disable CSPOL which puts HW-CS into deselected state */
cs &= ~BCM2835_SPI_CS_CSPOL;
/* use the "undefined" chip-select as precaution */
cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
}
/* finally set the calculated flags in SPI_CS */
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
}
static int chip_match_name(struct gpio_chip *chip, void *data)
{
return !strcmp(chip->label, data);
}
static int bcm2835_spi_setup(struct spi_device *spi)
{
int err;
struct gpio_chip *chip;
/*
* sanity checking the native-chipselects
*/
if (spi->mode & SPI_NO_CS)
return 0;
if (gpio_is_valid(spi->cs_gpio))
return 0;
if (spi->chip_select > 1) {
/* error in the case of native CS requested with CS > 1
* officially there is a CS2, but it is not documented
* which GPIO is connected with that...
*/
dev_err(&spi->dev,
"setup: only two native chip-selects are supported\n");
return -EINVAL;
}
/* now translate native cs to GPIO */
/* get the gpio chip for the base */
chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
if (!chip)
return 0;
/* and calculate the real CS */
spi->cs_gpio = chip->base + 8 - spi->chip_select;
/* and set up the "mode" and level */
dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
spi->chip_select, spi->cs_gpio);
/* set up GPIO as output and pull to the correct level */
err = gpio_direction_output(spi->cs_gpio,
(spi->mode & SPI_CS_HIGH) ? 0 : 1);
if (err) {
dev_err(&spi->dev,
"could not set CS%i gpio %i as output: %i",
spi->chip_select, spi->cs_gpio, err);
return err;
}
/* the implementation of pinctrl-bcm2835 currently does not
* set the GPIO value when using gpio_direction_output
* so we are setting it here explicitly
*/
gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
return 0;
}
static int bcm2835_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct bcm2835_spi *bs;
struct resource *res;
int err;
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master() failed\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, master);
master->mode_bits = BCM2835_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = 3;
master->setup = bcm2835_spi_setup;
master->set_cs = bcm2835_spi_set_cs;
master->transfer_one = bcm2835_spi_transfer_one;
master->handle_err = bcm2835_spi_handle_err;
master->prepare_message = bcm2835_spi_prepare_message;
master->dev.of_node = pdev->dev.of_node;
bs = spi_master_get_devdata(master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
goto out_master_put;
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_master_put;
}
bs->irq = platform_get_irq(pdev, 0);
if (bs->irq <= 0) {
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
err = bs->irq ? bs->irq : -ENODEV;
goto out_master_put;
}
clk_prepare_enable(bs->clk);
bcm2835_dma_init(master, &pdev->dev);
/* initialise the hardware with the default polarities */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
dev_name(&pdev->dev), master);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
err = devm_spi_register_master(&pdev->dev, master);
if (err) {
dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
goto out_clk_disable;
}
return 0;
out_clk_disable:
clk_disable_unprepare(bs->clk);
out_master_put:
spi_master_put(master);
return err;
}
static int bcm2835_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* Clear FIFOs, and disable the HW block */
bcm2835_wr(bs, BCM2835_SPI_CS,
BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
clk_disable_unprepare(bs->clk);
bcm2835_dma_release(master);
return 0;
}
static const struct of_device_id bcm2835_spi_match[] = {
{ .compatible = "brcm,bcm2835-spi", },
{}
};
MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
static struct platform_driver bcm2835_spi_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = bcm2835_spi_match,
},
.probe = bcm2835_spi_probe,
.remove = bcm2835_spi_remove,
};
module_platform_driver(bcm2835_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
chrisdearman/kernel-common | drivers/clk/rockchip/clk-pll.c | 435 | 12525 | /*
* Copyright (c) 2014 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/div64.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/regmap.h>
#include "clk.h"
#define PLL_MODE_MASK 0x3
#define PLL_MODE_SLOW 0x0
#define PLL_MODE_NORM 0x1
#define PLL_MODE_DEEP 0x2
struct rockchip_clk_pll {
struct clk_hw hw;
struct clk_mux pll_mux;
const struct clk_ops *pll_mux_ops;
struct notifier_block clk_nb;
void __iomem *reg_base;
int lock_offset;
unsigned int lock_shift;
enum rockchip_pll_type type;
u8 flags;
const struct rockchip_pll_rate_table *rate_table;
unsigned int rate_count;
spinlock_t *lock;
};
#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
#define to_rockchip_clk_pll_nb(nb) \
container_of(nb, struct rockchip_clk_pll, clk_nb)
static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
struct rockchip_clk_pll *pll, unsigned long rate)
{
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
int i;
for (i = 0; i < pll->rate_count; i++) {
if (rate == rate_table[i].rate)
return &rate_table[i];
}
return NULL;
}
static long rockchip_pll_round_rate(struct clk_hw *hw,
unsigned long drate, unsigned long *prate)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
int i;
/* Assumming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
if (drate >= rate_table[i].rate)
return rate_table[i].rate;
}
/* return minimum supported value */
return rate_table[i - 1].rate;
}
/*
* Wait for the pll to reach the locked state.
* The calling set_rate function is responsible for making sure the
* grf regmap is available.
*/
static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
{
struct regmap *grf = rockchip_clk_get_grf();
unsigned int val;
int delay = 24000000, ret;
while (delay > 0) {
ret = regmap_read(grf, pll->lock_offset, &val);
if (ret) {
pr_err("%s: failed to read pll lock status: %d\n",
__func__, ret);
return ret;
}
if (val & BIT(pll->lock_shift))
return 0;
delay--;
}
pr_err("%s: timeout waiting for pll to lock\n", __func__);
return -ETIMEDOUT;
}
/**
* PLL used in RK3066, RK3188 and RK3288
*/
#define RK3066_PLL_RESET_DELAY(nr) ((nr * 500) / 24 + 1)
#define RK3066_PLLCON(i) (i * 0x4)
#define RK3066_PLLCON0_OD_MASK 0xf
#define RK3066_PLLCON0_OD_SHIFT 0
#define RK3066_PLLCON0_NR_MASK 0x3f
#define RK3066_PLLCON0_NR_SHIFT 8
#define RK3066_PLLCON1_NF_MASK 0x1fff
#define RK3066_PLLCON1_NF_SHIFT 0
#define RK3066_PLLCON2_BWADJ_MASK 0xfff
#define RK3066_PLLCON2_BWADJ_SHIFT 0
#define RK3066_PLLCON3_RESET (1 << 5)
#define RK3066_PLLCON3_PWRDOWN (1 << 1)
#define RK3066_PLLCON3_BYPASS (1 << 0)
static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
u64 nf, nr, no, rate64 = prate;
u32 pllcon;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
if (pllcon & RK3066_PLLCON3_BYPASS) {
pr_debug("%s: pll %s is bypassed\n", __func__,
__clk_get_name(hw->clk));
return prate;
}
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
rate64 *= (nf + 1);
do_div(rate64, nr + 1);
do_div(rate64, no + 1);
return (unsigned long)rate64;
}
static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long prate)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
struct regmap *grf = rockchip_clk_get_grf();
struct clk_mux *pll_mux = &pll->pll_mux;
const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
int rate_change_remuxed = 0;
int cur_parent;
int ret;
if (IS_ERR(grf)) {
pr_debug("%s: grf regmap not available, aborting rate change\n",
__func__);
return PTR_ERR(grf);
}
pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
__func__, __clk_get_name(hw->clk), old_rate, drate, prate);
/* Get required rate settings from table */
rate = rockchip_get_pll_settings(pll, drate);
if (!rate) {
pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
drate, __clk_get_name(hw->clk));
return -EINVAL;
}
pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
__func__, rate->rate, rate->nr, rate->no, rate->nf);
cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
if (cur_parent == PLL_MODE_NORM) {
pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
rate_change_remuxed = 1;
}
/* enter reset mode */
writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
pll->reg_base + RK3066_PLLCON(3));
/* update pll values */
writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
RK3066_PLLCON0_NR_SHIFT) |
HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
RK3066_PLLCON0_OD_SHIFT),
pll->reg_base + RK3066_PLLCON(0));
writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
RK3066_PLLCON1_NF_SHIFT),
pll->reg_base + RK3066_PLLCON(1));
writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
RK3066_PLLCON2_BWADJ_SHIFT),
pll->reg_base + RK3066_PLLCON(2));
/* leave reset and wait the reset_delay */
writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
pll->reg_base + RK3066_PLLCON(3));
udelay(RK3066_PLL_RESET_DELAY(rate->nr));
/* wait for the pll to lock */
ret = rockchip_pll_wait_lock(pll);
if (ret) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
__func__, old_rate);
rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
}
if (rate_change_remuxed)
pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
return ret;
}
static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
pll->reg_base + RK3066_PLLCON(3));
return 0;
}
static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
RK3066_PLLCON3_PWRDOWN, 0),
pll->reg_base + RK3066_PLLCON(3));
}
static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
return !(pllcon & RK3066_PLLCON3_PWRDOWN);
}
static void rockchip_rk3066_pll_init(struct clk_hw *hw)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
unsigned int nf, nr, no, bwadj;
unsigned long drate;
u32 pllcon;
if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
return;
drate = __clk_get_rate(hw->clk);
rate = rockchip_get_pll_settings(pll, drate);
/* when no rate setting for the current rate, rely on clk_set_rate */
if (!rate)
return;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
__func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
if (rate->nr != nr || rate->no != no || rate->nf != nf
|| rate->bwadj != bwadj) {
struct clk *parent = __clk_get_parent(hw->clk);
unsigned long prate;
if (!parent) {
pr_warn("%s: parent of %s not available\n",
__func__, __clk_get_name(hw->clk));
return;
}
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
__func__, __clk_get_name(hw->clk));
prate = __clk_get_rate(parent);
rockchip_rk3066_pll_set_rate(hw, drate, prate);
}
}
static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
.is_enabled = rockchip_rk3066_pll_is_enabled,
};
static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
.round_rate = rockchip_pll_round_rate,
.set_rate = rockchip_rk3066_pll_set_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
.is_enabled = rockchip_rk3066_pll_is_enabled,
.init = rockchip_rk3066_pll_init,
};
/*
* Common registering of pll clocks
*/
struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
const char *name, const char **parent_names, u8 num_parents,
void __iomem *base, int con_offset, int grf_lock_offset,
int lock_shift, int mode_offset, int mode_shift,
struct rockchip_pll_rate_table *rate_table,
u8 clk_pll_flags, spinlock_t *lock)
{
const char *pll_parents[3];
struct clk_init_data init;
struct rockchip_clk_pll *pll;
struct clk_mux *pll_mux;
struct clk *pll_clk, *mux_clk;
char pll_name[20];
if (num_parents != 2) {
pr_err("%s: needs two parent clocks\n", __func__);
return ERR_PTR(-EINVAL);
}
/* name the actual pll */
snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
init.name = pll_name;
/* keep all plls untouched for now */
init.flags = CLK_IGNORE_UNUSED;
init.parent_names = &parent_names[0];
init.num_parents = 1;
if (rate_table) {
int len;
/* find count of rates in rate_table */
for (len = 0; rate_table[len].rate != 0; )
len++;
pll->rate_count = len;
pll->rate_table = kmemdup(rate_table,
pll->rate_count *
sizeof(struct rockchip_pll_rate_table),
GFP_KERNEL);
WARN(!pll->rate_table,
"%s: could not allocate rate table for %s\n",
__func__, name);
}
switch (pll_type) {
case pll_rk3066:
if (!pll->rate_table)
init.ops = &rockchip_rk3066_pll_clk_norate_ops;
else
init.ops = &rockchip_rk3066_pll_clk_ops;
break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, name);
}
pll->hw.init = &init;
pll->type = pll_type;
pll->reg_base = base + con_offset;
pll->lock_offset = grf_lock_offset;
pll->lock_shift = lock_shift;
pll->flags = clk_pll_flags;
pll->lock = lock;
/* create the mux on top of the real pll */
pll->pll_mux_ops = &clk_mux_ops;
pll_mux = &pll->pll_mux;
pll_mux->reg = base + mode_offset;
pll_mux->shift = mode_shift;
pll_mux->mask = PLL_MODE_MASK;
pll_mux->flags = 0;
pll_mux->lock = lock;
pll_mux->hw.init = &init;
if (pll_type == pll_rk3066)
pll_mux->flags |= CLK_MUX_HIWORD_MASK;
pll_clk = clk_register(NULL, &pll->hw);
if (IS_ERR(pll_clk)) {
pr_err("%s: failed to register pll clock %s : %ld\n",
__func__, name, PTR_ERR(pll_clk));
mux_clk = pll_clk;
goto err_pll;
}
/* the actual muxing is xin24m, pll-output, xin32k */
pll_parents[0] = parent_names[0];
pll_parents[1] = pll_name;
pll_parents[2] = parent_names[1];
init.name = name;
init.flags = CLK_SET_RATE_PARENT;
init.ops = pll->pll_mux_ops;
init.parent_names = pll_parents;
init.num_parents = ARRAY_SIZE(pll_parents);
mux_clk = clk_register(NULL, &pll_mux->hw);
if (IS_ERR(mux_clk))
goto err_mux;
return mux_clk;
err_mux:
clk_unregister(pll_clk);
err_pll:
kfree(pll);
return mux_clk;
}
| gpl-2.0 |
ace0/linux-whirlwind-rng | drivers/usb/dwc3/dwc3-keystone.c | 435 | 4953 | /**
* dwc3-keystone.c - Keystone Specific Glue layer
*
* Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: WingMan Kwok <w-kwok2@ti.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
* the License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
/* USBSS register offsets */
#define USBSS_REVISION 0x0000
#define USBSS_SYSCONFIG 0x0010
#define USBSS_IRQ_EOI 0x0018
#define USBSS_IRQSTATUS_RAW_0 0x0020
#define USBSS_IRQSTATUS_0 0x0024
#define USBSS_IRQENABLE_SET_0 0x0028
#define USBSS_IRQENABLE_CLR_0 0x002c
/* IRQ register bits */
#define USBSS_IRQ_EOI_LINE(n) BIT(n)
#define USBSS_IRQ_EVENT_ST BIT(0)
#define USBSS_IRQ_COREIRQ_EN BIT(0)
#define USBSS_IRQ_COREIRQ_CLR BIT(0)
static u64 kdwc3_dma_mask;
struct dwc3_keystone {
struct device *dev;
struct clk *clk;
void __iomem *usbss;
};
static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void kdwc3_writel(void __iomem *base, u32 offset, u32 value)
{
writel(value, base + offset);
}
static void kdwc3_enable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val |= USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static void kdwc3_disable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val &= ~USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static irqreturn_t dwc3_keystone_interrupt(int irq, void *_kdwc)
{
struct dwc3_keystone *kdwc = _kdwc;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_CLR_0, USBSS_IRQ_COREIRQ_CLR);
kdwc3_writel(kdwc->usbss, USBSS_IRQSTATUS_0, USBSS_IRQ_EVENT_ST);
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, USBSS_IRQ_COREIRQ_EN);
kdwc3_writel(kdwc->usbss, USBSS_IRQ_EOI, USBSS_IRQ_EOI_LINE(0));
return IRQ_HANDLED;
}
static int kdwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_keystone *kdwc;
struct resource *res;
int error, irq;
kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
if (!kdwc)
return -ENOMEM;
platform_set_drvdata(pdev, kdwc);
kdwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing usbss resource\n");
return -EINVAL;
}
kdwc->usbss = devm_ioremap_resource(dev, res);
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
kdwc3_dma_mask = dma_get_mask(dev);
dev->dma_mask = &kdwc3_dma_mask;
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
dev_dbg(kdwc->dev, "unable to enable usb clock, err %d\n",
error);
return error;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing irq\n");
goto err_irq;
}
error = devm_request_irq(dev, irq, dwc3_keystone_interrupt, IRQF_SHARED,
dev_name(dev), kdwc);
if (error) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
irq, error);
goto err_irq;
}
kdwc3_enable_irqs(kdwc);
error = of_platform_populate(node, NULL, NULL, dev);
if (error) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto err_core;
}
return 0;
err_core:
kdwc3_disable_irqs(kdwc);
err_irq:
clk_disable_unprepare(kdwc->clk);
return error;
}
static int kdwc3_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static int kdwc3_remove(struct platform_device *pdev)
{
struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
kdwc3_disable_irqs(kdwc);
device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
clk_disable_unprepare(kdwc->clk);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id kdwc3_of_match[] = {
{ .compatible = "ti,keystone-dwc3", },
{},
};
MODULE_DEVICE_TABLE(of, kdwc3_of_match);
static struct platform_driver kdwc3_driver = {
.probe = kdwc3_probe,
.remove = kdwc3_remove,
.driver = {
.name = "keystone-dwc3",
.owner = THIS_MODULE,
.of_match_table = kdwc3_of_match,
},
};
module_platform_driver(kdwc3_driver);
MODULE_ALIAS("platform:keystone-dwc3");
MODULE_AUTHOR("WingMan Kwok <w-kwok2@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 KEYSTONE Glue Layer");
| gpl-2.0 |
veo-labs/linux-veobox | drivers/clk/rockchip/clk-mmc-phase.c | 435 | 4020 | /*
* Copyright 2014 Google, Inc
* Author: Alexandru M Stan <amstan@chromium.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
int id;
int shift;
};
#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
#define RK3288_MMC_CLKGEN_DIV 2
static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
return parent_rate / RK3288_MMC_CLKGEN_DIV;
}
#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
#define PSECS_PER_SEC 1000000000000LL
/*
* Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
* simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
*/
#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
static int rockchip_mmc_get_phase(struct clk_hw *hw)
{
struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
unsigned long rate = clk_get_rate(hw->clk);
u32 raw_value;
u16 degrees;
u32 delay_num = 0;
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
/* degrees/delaynum * 10000 */
unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
36 * (rate / 1000000);
delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
degrees += delay_num * factor / 10000;
}
return degrees % 360;
}
static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
{
struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
unsigned long rate = clk_get_rate(hw->clk);
u8 nineties, remainder;
u8 delay_num;
u32 raw_value;
u64 delay;
/* allow 22 to be 22.5 */
degrees++;
/* floor to 22.5 increment */
degrees -= ((degrees) * 10 % 225) / 10;
nineties = degrees / 90;
/* 22.5 multiples */
remainder = (degrees % 90) / 22;
delay = PSECS_PER_SEC;
do_div(delay, rate);
/* / 360 / 22.5 */
do_div(delay, 16);
do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
delay *= remainder;
delay_num = (u8) min(delay, 255ULL);
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
__clk_get_name(hw->clk), degrees, delay_num,
mmc_clock->reg, raw_value>>(mmc_clock->shift),
rockchip_mmc_get_phase(hw)
);
return 0;
}
static const struct clk_ops rockchip_mmc_clk_ops = {
.recalc_rate = rockchip_mmc_recalc,
.get_phase = rockchip_mmc_get_phase,
.set_phase = rockchip_mmc_set_phase,
};
struct clk *rockchip_clk_register_mmc(const char *name,
const char **parent_names, u8 num_parents,
void __iomem *reg, int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
struct clk *clk;
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
return NULL;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
mmc_clock->shift = shift;
if (name)
init.name = name;
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
goto err_free;
return clk;
err_free:
kfree(mmc_clock);
return NULL;
}
| gpl-2.0 |
javelinanddart/UBER-L | drivers/net/ethernet/realtek/8139cp.c | 947 | 56178 | /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
/*
Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
Copyright 2001 Manfred Spraul [natsemi.c]
Copyright 1999-2001 by Donald Becker. [natsemi.c]
Written 1997-2001 by Donald Becker. [8139too.c]
Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
See the file COPYING in this distribution for more information.
Contributors:
Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
TODO:
* Test Tx checksumming thoroughly
Low priority TODO:
* Complete reset on PciErr
* Consider Rx interrupt mitigation using TimerIntr
* Investigate using skb->priority with h/w VLAN priority
* Investigate using High Priority Tx Queue with skb->priority
* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
* Implement Tx software interrupt mitigation via
Tx descriptor bit
* The real minimum of CP_MIN_MTU is 4 bytes. However,
for this to be supported, one must(?) turn on packet padding.
* Support external MII transceivers (patch available)
NOTES:
* TX checksumming is considered experimental. It is off by
default, use ethtool to turn it on.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "8139cp"
#define DRV_VERSION "1.3"
#define DRV_RELDATE "Mar 22, 2004"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/cache.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/* These identify the driver base version and may not be removed. */
static char version[] =
DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK)
#define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
#define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
#define CP_REGS_SIZE (0xff + 1)
#define CP_REGS_VER 1 /* version 1 */
#define CP_RX_RING_SIZE 64
#define CP_TX_RING_SIZE 64
#define CP_RING_BYTES \
((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
(sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
CP_STATS_SIZE)
#define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
#define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
#define TX_BUFFS_AVAIL(CP) \
(((CP)->tx_tail <= (CP)->tx_head) ? \
(CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
(CP)->tx_tail - (CP)->tx_head - 1)
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#define CP_INTERNAL_PHY 32
/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
#define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6*HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
#define CP_MAX_MTU 4096
enum {
/* NIC register offsets */
MAC0 = 0x00, /* Ethernet hardware address. */
MAR0 = 0x08, /* Multicast filter. */
StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
Cmd = 0x37, /* Command register */
IntrMask = 0x3C, /* Interrupt mask */
IntrStatus = 0x3E, /* Interrupt status */
TxConfig = 0x40, /* Tx configuration */
ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
RxConfig = 0x44, /* Rx configuration */
RxMissed = 0x4C, /* 24 bits valid, write clears */
Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
Config1 = 0x52, /* Config1 */
Config3 = 0x59, /* Config3 */
Config4 = 0x5A, /* Config4 */
MultiIntr = 0x5C, /* Multiple interrupt select */
BasicModeCtrl = 0x62, /* MII BMCR */
BasicModeStatus = 0x64, /* MII BMSR */
NWayAdvert = 0x66, /* MII ADVERTISE */
NWayLPAR = 0x68, /* MII LPA */
NWayExpansion = 0x6A, /* MII Expansion */
Config5 = 0xD8, /* Config5 */
TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
TxThresh = 0xEC, /* Early Tx threshold */
OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
/* Tx and Rx status descriptors */
DescOwn = (1 << 31), /* Descriptor is owned by NIC */
RingEnd = (1 << 30), /* End of descriptor ring */
FirstFrag = (1 << 29), /* First segment of a packet */
LastFrag = (1 << 28), /* Final segment of a packet */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
MSSMask = 0xfff, /* MSS value: 11 bits */
TxError = (1 << 23), /* Tx error summary */
RxError = (1 << 20), /* Rx error summary */
IPCS = (1 << 18), /* Calculate IP checksum */
UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
TxVlanTag = (1 << 17), /* Add VLAN tag */
RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
IPFail = (1 << 15), /* IP checksum failed */
UDPFail = (1 << 14), /* UDP/IP checksum failed */
TCPFail = (1 << 13), /* TCP/IP checksum failed */
NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
RxProtoTCP = 1,
RxProtoUDP = 2,
RxProtoIP = 3,
TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
TxOWC = (1 << 22), /* Tx Out-of-window collision */
TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
RxErrFrame = (1 << 27), /* Rx frame alignment error */
RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
RxErrCRC = (1 << 18), /* Rx CRC error */
RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
/* StatsAddr register */
DumpStats = (1 << 3), /* Begin stats dump */
/* RxConfig register */
RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
AcceptErr = 0x20, /* Accept packets with CRC errors */
AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
AcceptBroadcast = 0x08, /* Accept broadcast packets */
AcceptMulticast = 0x04, /* Accept multicast packets */
AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
/* IntrMask / IntrStatus registers */
PciErr = (1 << 15), /* System error on the PCI bus */
TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
LenChg = (1 << 13), /* Cable length change */
SWInt = (1 << 8), /* Software-requested interrupt */
TxEmpty = (1 << 7), /* No Tx descriptors available */
RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
LinkChg = (1 << 5), /* Packet underrun, or link change */
RxEmpty = (1 << 4), /* No Rx descriptors available */
TxErr = (1 << 3), /* Tx error */
TxOK = (1 << 2), /* Tx packet sent */
RxErr = (1 << 1), /* Rx error */
RxOK = (1 << 0), /* Rx packet received */
IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
but hardware likes to raise it */
IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
RxErr | RxOK | IntrResvd,
/* C mode command register */
CmdReset = (1 << 4), /* Enable to reset; self-clearing */
RxOn = (1 << 3), /* Rx mode enable */
TxOn = (1 << 2), /* Tx mode enable */
/* C+ mode command register */
RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
RxChkSum = (1 << 5), /* Rx checksum offload enable */
PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
CpRxOn = (1 << 1), /* Rx mode enable */
CpTxOn = (1 << 0), /* Tx mode enable */
/* Cfg9436 EEPROM control register */
Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
/* TxConfig register */
IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
/* Early Tx Threshold register */
TxThreshMask = 0x3f, /* Mask bits 5-0 */
TxThreshMax = 2048, /* Max early Tx threshold */
/* Config1 register */
DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
LWACT = (1 << 4), /* LWAKE active mode */
PMEnable = (1 << 0), /* Enable various PM features of chip */
/* Config3 register */
PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
/* Config4 register */
LWPTN = (1 << 1), /* LWAKE Pattern */
LWPME = (1 << 4), /* LANWAKE vs PMEB */
/* Config5 register */
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
MWF = (1 << 5), /* Accept Multicast wakeup frame */
UWF = (1 << 4), /* Accept Unicast wakeup frame */
LANWake = (1 << 1), /* Enable LANWake signal */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
};
static const unsigned int cp_rx_config =
(RX_FIFO_THRESH << RxCfgFIFOShift) |
(RX_DMA_BURST << RxCfgDMAShift);
struct cp_desc {
__le32 opts1;
__le32 opts2;
__le64 addr;
};
struct cp_dma_stats {
__le64 tx_ok;
__le64 rx_ok;
__le64 tx_err;
__le32 rx_err;
__le16 rx_fifo;
__le16 frame_align;
__le32 tx_ok_1col;
__le32 tx_ok_mcol;
__le64 rx_ok_phys;
__le64 rx_ok_bcast;
__le32 rx_ok_mcast;
__le16 tx_abort;
__le16 tx_underrun;
} __packed;
struct cp_extra_stats {
unsigned long rx_frags;
};
struct cp_private {
void __iomem *regs;
struct net_device *dev;
spinlock_t lock;
u32 msg_enable;
struct napi_struct napi;
struct pci_dev *pdev;
u32 rx_config;
u16 cpcmd;
struct cp_extra_stats cp_stats;
unsigned rx_head ____cacheline_aligned;
unsigned rx_tail;
struct cp_desc *rx_ring;
struct sk_buff *rx_skb[CP_RX_RING_SIZE];
unsigned tx_head ____cacheline_aligned;
unsigned tx_tail;
struct cp_desc *tx_ring;
struct sk_buff *tx_skb[CP_TX_RING_SIZE];
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
dma_addr_t ring_dma;
struct mii_if_info mii_if;
};
#define cpr8(reg) readb(cp->regs + (reg))
#define cpr16(reg) readw(cp->regs + (reg))
#define cpr32(reg) readl(cp->regs + (reg))
#define cpw8(reg,val) writeb((val), cp->regs + (reg))
#define cpw16(reg,val) writew((val), cp->regs + (reg))
#define cpw32(reg,val) writel((val), cp->regs + (reg))
#define cpw8_f(reg,val) do { \
writeb((val), cp->regs + (reg)); \
readb(cp->regs + (reg)); \
} while (0)
#define cpw16_f(reg,val) do { \
writew((val), cp->regs + (reg)); \
readw(cp->regs + (reg)); \
} while (0)
#define cpw32_f(reg,val) do { \
writel((val), cp->regs + (reg)); \
readl(cp->regs + (reg)); \
} while (0)
static void __cp_set_rx_mode (struct net_device *dev);
static void cp_tx (struct cp_private *cp);
static void cp_clean_rings (struct cp_private *cp);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cp_poll_controller(struct net_device *dev);
#endif
static int cp_get_eeprom_len(struct net_device *dev);
static int cp_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
};
MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
static struct {
const char str[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
{ "tx_ok" },
{ "rx_ok" },
{ "tx_err" },
{ "rx_err" },
{ "rx_fifo" },
{ "frame_align" },
{ "tx_ok_1col" },
{ "tx_ok_mcol" },
{ "rx_ok_phys" },
{ "rx_ok_bcast" },
{ "rx_ok_mcast" },
{ "tx_abort" },
{ "tx_underrun" },
{ "rx_frags" },
};
static inline void cp_set_rxbufsize (struct cp_private *cp)
{
unsigned int mtu = cp->dev->mtu;
if (mtu > ETH_DATA_LEN)
/* MTU + ethernet header + FCS + optional VLAN tag */
cp->rx_buf_sz = mtu + ETH_HLEN + 8;
else
cp->rx_buf_sz = PKT_BUF_SZ;
}
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
struct cp_desc *desc)
{
u32 opts2 = le32_to_cpu(desc->opts2);
skb->protocol = eth_type_trans (skb, cp->dev);
cp->dev->stats.rx_packets++;
cp->dev->stats.rx_bytes += skb->len;
if (opts2 & RxVlanTagged)
__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
napi_gro_receive(&cp->napi, skb);
}
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
u32 status, u32 len)
{
netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
rx_tail, status, len);
cp->dev->stats.rx_errors++;
if (status & RxErrFrame)
cp->dev->stats.rx_frame_errors++;
if (status & RxErrCRC)
cp->dev->stats.rx_crc_errors++;
if ((status & RxErrRunt) || (status & RxErrLong))
cp->dev->stats.rx_length_errors++;
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
cp->dev->stats.rx_length_errors++;
if (status & RxErrFIFO)
cp->dev->stats.rx_fifo_errors++;
}
static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
else
return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
{
struct cp_private *cp = container_of(napi, struct cp_private, napi);
struct net_device *dev = cp->dev;
unsigned int rx_tail = cp->rx_tail;
int rx;
rx_status_loop:
rx = 0;
cpw16(IntrStatus, cp_rx_intr_mask);
while (1) {
u32 status, len;
dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz;
skb = cp->rx_skb[rx_tail];
BUG_ON(!skb);
desc = &cp->rx_ring[rx_tail];
status = le32_to_cpu(desc->opts1);
if (status & DescOwn)
break;
len = (status & 0x1fff) - 4;
mapping = le64_to_cpu(desc->addr);
if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
/* we don't support incoming fragmented frames.
* instead, we attempt to ensure that the
* pre-allocated RX skbs are properly sized such
* that RX fragments are never encountered
*/
cp_rx_err_acct(cp, rx_tail, status, len);
dev->stats.rx_dropped++;
cp->cp_stats.rx_frags++;
goto rx_next;
}
if (status & (RxError | RxErrFIFO)) {
cp_rx_err_acct(cp, rx_tail, status, len);
goto rx_next;
}
netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
rx_tail, status, len);
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
if (!new_skb) {
dev->stats.rx_dropped++;
goto rx_next;
}
new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
dev->stats.rx_dropped++;
kfree_skb(new_skb);
goto rx_next;
}
dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
/* Handle checksum offloading for incoming packets. */
if (cp_rx_csum_ok(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb_put(skb, len);
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
mapping = new_mapping;
rx_next:
cp->rx_ring[rx_tail].opts2 = 0;
cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
if (rx_tail == (CP_RX_RING_SIZE - 1))
desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
cp->rx_buf_sz);
else
desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
rx_tail = NEXT_RX(rx_tail);
if (rx >= budget)
break;
}
cp->rx_tail = rx_tail;
/* if we did not reach work limit, then we're done with
* this round of polling
*/
if (rx < budget) {
unsigned long flags;
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
napi_gro_flush(napi);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
spin_unlock_irqrestore(&cp->lock, flags);
}
return rx;
}
static irqreturn_t cp_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct cp_private *cp;
u16 status;
if (unlikely(dev == NULL))
return IRQ_NONE;
cp = netdev_priv(dev);
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
return IRQ_NONE;
netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
spin_lock(&cp->lock);
/* close possible race's with dev_close */
if (unlikely(!netif_running(dev))) {
cpw16(IntrMask, 0);
spin_unlock(&cp->lock);
return IRQ_HANDLED;
}
if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
if (napi_schedule_prep(&cp->napi)) {
cpw16_f(IntrMask, cp_norx_intr_mask);
__napi_schedule(&cp->napi);
}
if (status & (TxOK | TxErr | TxEmpty | SWInt))
cp_tx(cp);
if (status & LinkChg)
mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
spin_unlock(&cp->lock);
if (status & PciErr) {
u16 pci_status;
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
status, pci_status);
/* TODO: reset hardware */
}
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void cp_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
cp_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static void cp_tx (struct cp_private *cp)
{
unsigned tx_head = cp->tx_head;
unsigned tx_tail = cp->tx_tail;
while (tx_tail != tx_head) {
struct cp_desc *txd = cp->tx_ring + tx_tail;
struct sk_buff *skb;
u32 status;
rmb();
status = le32_to_cpu(txd->opts1);
if (status & DescOwn)
break;
skb = cp->tx_skb[tx_tail];
BUG_ON(!skb);
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
le32_to_cpu(txd->opts1) & 0xffff,
PCI_DMA_TODEVICE);
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
netif_dbg(cp, tx_err, cp->dev,
"tx err, status 0x%x\n", status);
cp->dev->stats.tx_errors++;
if (status & TxOWC)
cp->dev->stats.tx_window_errors++;
if (status & TxMaxCol)
cp->dev->stats.tx_aborted_errors++;
if (status & TxLinkFail)
cp->dev->stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
cp->dev->stats.tx_fifo_errors++;
} else {
cp->dev->stats.collisions +=
((status >> TxColCntShift) & TxColCntMask);
cp->dev->stats.tx_packets++;
cp->dev->stats.tx_bytes += skb->len;
netif_dbg(cp, tx_done, cp->dev,
"tx done, slot %d\n", tx_tail);
}
dev_kfree_skb_irq(skb);
}
cp->tx_skb[tx_tail] = NULL;
tx_tail = NEXT_TX(tx_tail);
}
cp->tx_tail = tx_tail;
if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(cp->dev);
}
static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
{
return vlan_tx_tag_present(skb) ?
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
int first, int entry_last)
{
int frag, index;
struct cp_desc *txd;
skb_frag_t *this_frag;
for (frag = 0; frag+first < entry_last; frag++) {
index = first+frag;
cp->tx_skb[index] = NULL;
txd = &cp->tx_ring[index];
this_frag = &skb_shinfo(skb)->frags[frag];
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
skb_frag_size(this_frag), PCI_DMA_TODEVICE);
}
}
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
u32 eor, flags;
unsigned long intr_flags;
__le32 opts2;
int mss = 0;
spin_lock_irqsave(&cp->lock, intr_flags);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->lock, intr_flags);
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
mss = skb_shinfo(skb)->gso_size;
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
u32 len;
dma_addr_t mapping;
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping))
goto out_dma_error;
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
flags = eor | len | DescOwn | FirstFrag | LastFrag;
if (mss)
flags |= LargeSend | ((mss & MSSMask) << MSSShift);
else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
flags |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
flags |= IPCS | UDPCS;
else
WARN_ON(1); /* we need a WARN() */
}
txd->opts1 = cpu_to_le32(flags);
wmb();
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
} else {
struct cp_desc *txd;
u32 first_len, first_eor;
dma_addr_t first_mapping;
int frag, first_entry = entry;
const struct iphdr *ip = ip_hdr(skb);
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
*/
first_eor = eor;
first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, first_mapping))
goto out_dma_error;
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
u32 ctrl;
dma_addr_t mapping;
len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
unwind_tx_frag_mapping(cp, skb, first_entry, entry);
goto out_dma_error;
}
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn;
if (mss)
ctrl |= LargeSend |
((mss & MSSMask) << MSSShift);
else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
ctrl |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
ctrl |= IPCS | UDPCS;
else
BUG();
}
if (frag == skb_shinfo(skb)->nr_frags - 1)
ctrl |= LastFrag;
txd = &cp->tx_ring[entry];
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
txd->opts1 = cpu_to_le32(ctrl);
wmb();
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
}
txd = &cp->tx_ring[first_entry];
txd->opts2 = opts2;
txd->addr = cpu_to_le64(first_mapping);
wmb();
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (ip->protocol == IPPROTO_TCP)
txd->opts1 = cpu_to_le32(first_eor | first_len |
FirstFrag | DescOwn |
IPCS | TCPCS);
else if (ip->protocol == IPPROTO_UDP)
txd->opts1 = cpu_to_le32(first_eor | first_len |
FirstFrag | DescOwn |
IPCS | UDPCS);
else
BUG();
} else
txd->opts1 = cpu_to_le32(first_eor | first_len |
FirstFrag | DescOwn);
wmb();
}
cp->tx_head = entry;
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
out_dma_error:
kfree_skb(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct netdev_hw_addr *ha;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
}
}
/* We can safely update without stopping the chip. */
cp->rx_config = cp_rx_config | rx_mode;
cpw32_f(RxConfig, cp->rx_config);
cpw32_f (MAR0 + 0, mc_filter[0]);
cpw32_f (MAR0 + 4, mc_filter[1]);
}
static void cp_set_rx_mode (struct net_device *dev)
{
unsigned long flags;
struct cp_private *cp = netdev_priv(dev);
spin_lock_irqsave (&cp->lock, flags);
__cp_set_rx_mode(dev);
spin_unlock_irqrestore (&cp->lock, flags);
}
static void __cp_get_stats(struct cp_private *cp)
{
/* only lower 24 bits valid; write any value to clear */
cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
cpw32 (RxMissed, 0);
}
static struct net_device_stats *cp_get_stats(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
/* The chip only need report frame silently dropped. */
spin_lock_irqsave(&cp->lock, flags);
if (netif_running(dev) && netif_device_present(dev))
__cp_get_stats(cp);
spin_unlock_irqrestore(&cp->lock, flags);
return &dev->stats;
}
static void cp_stop_hw (struct cp_private *cp)
{
cpw16(IntrStatus, ~(cpr16(IntrStatus)));
cpw16_f(IntrMask, 0);
cpw8(Cmd, 0);
cpw16_f(CpCmd, 0);
cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
cp->rx_tail = 0;
cp->tx_head = cp->tx_tail = 0;
}
static void cp_reset_hw (struct cp_private *cp)
{
unsigned work = 1000;
cpw8(Cmd, CmdReset);
while (work--) {
if (!(cpr8(Cmd) & CmdReset))
return;
schedule_timeout_uninterruptible(10);
}
netdev_err(cp->dev, "hardware reset timeout\n");
}
static inline void cp_start_hw (struct cp_private *cp)
{
cpw16(CpCmd, cp->cpcmd);
cpw8(Cmd, RxOn | TxOn);
}
static void cp_enable_irq(struct cp_private *cp)
{
cpw16_f(IntrMask, cp_intr_mask);
}
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
dma_addr_t ring_dma;
cp_reset_hw(cp);
cpw8_f (Cfg9346, Cfg9346_Unlock);
/* Restore our idea of the MAC address. */
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
__cp_set_rx_mode(dev);
cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
cpw8(Config3, PARMEnable);
cp->wol_enabled = 0;
cpw8(Config5, cpr8(Config5) & PMEStatus);
cpw32_f(HiTxRingAddr, 0);
cpw32_f(HiTxRingAddr + 4, 0);
ring_dma = cp->ring_dma;
cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
}
static int cp_refill_rx(struct cp_private *cp)
{
struct net_device *dev = cp->dev;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
struct sk_buff *skb;
dma_addr_t mapping;
skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
if (!skb)
goto err_out;
mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
kfree_skb(skb);
goto err_out;
}
cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
cp->rx_ring[i].addr = cpu_to_le64(mapping);
if (i == (CP_RX_RING_SIZE - 1))
cp->rx_ring[i].opts1 =
cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
else
cp->rx_ring[i].opts1 =
cpu_to_le32(DescOwn | cp->rx_buf_sz);
}
return 0;
err_out:
cp_clean_rings(cp);
return -ENOMEM;
}
static void cp_init_rings_index (struct cp_private *cp)
{
cp->rx_tail = 0;
cp->tx_head = cp->tx_tail = 0;
}
static int cp_init_rings (struct cp_private *cp)
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
cp_init_rings_index(cp);
return cp_refill_rx (cp);
}
static int cp_alloc_rings (struct cp_private *cp)
{
void *mem;
mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
&cp->ring_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
cp->rx_ring = mem;
cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
return cp_init_rings(cp);
}
static void cp_clean_rings (struct cp_private *cp)
{
struct cp_desc *desc;
unsigned i;
for (i = 0; i < CP_RX_RING_SIZE; i++) {
if (cp->rx_skb[i]) {
desc = cp->rx_ring + i;
dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(cp->rx_skb[i]);
}
}
for (i = 0; i < CP_TX_RING_SIZE; i++) {
if (cp->tx_skb[i]) {
struct sk_buff *skb = cp->tx_skb[i];
desc = cp->tx_ring + i;
dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
le32_to_cpu(desc->opts1) & 0xffff,
PCI_DMA_TODEVICE);
if (le32_to_cpu(desc->opts1) & LastFrag)
dev_kfree_skb(skb);
cp->dev->stats.tx_dropped++;
}
}
netdev_reset_queue(cp->dev);
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
}
static void cp_free_rings (struct cp_private *cp)
{
cp_clean_rings(cp);
dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
cp->ring_dma);
cp->rx_ring = NULL;
cp->tx_ring = NULL;
}
static int cp_open (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
netif_dbg(cp, ifup, dev, "enabling interface\n");
rc = cp_alloc_rings(cp);
if (rc)
return rc;
napi_enable(&cp->napi);
cp_init_hw(cp);
rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
goto err_out_hw;
cp_enable_irq(cp);
netif_carrier_off(dev);
mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
netif_start_queue(dev);
return 0;
err_out_hw:
napi_disable(&cp->napi);
cp_stop_hw(cp);
cp_free_rings(cp);
return rc;
}
static int cp_close (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
napi_disable(&cp->napi);
netif_dbg(cp, ifdown, dev, "disabling interface\n");
spin_lock_irqsave(&cp->lock, flags);
netif_stop_queue(dev);
netif_carrier_off(dev);
cp_stop_hw(cp);
spin_unlock_irqrestore(&cp->lock, flags);
free_irq(dev->irq, dev);
cp_free_rings(cp);
return 0;
}
static void cp_tx_timeout(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
int rc;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
cpr16(IntrStatus), cpr16(IntrMask));
spin_lock_irqsave(&cp->lock, flags);
cp_stop_hw(cp);
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
cp_enable_irq(cp);
netif_wake_queue(dev);
spin_unlock_irqrestore(&cp->lock, flags);
}
#ifdef BROKEN
static int cp_change_mtu(struct net_device *dev, int new_mtu)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
/* check for invalid MTU, according to hardware limits */
if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
return -EINVAL;
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
dev->mtu = new_mtu;
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
spin_lock_irqsave(&cp->lock, flags);
cp_stop_hw(cp); /* stop h/w and free rings */
cp_clean_rings(cp);
dev->mtu = new_mtu;
cp_set_rxbufsize(cp); /* set new rx buf size */
rc = cp_init_rings(cp); /* realloc and restart h/w */
cp_start_hw(cp);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
#endif /* BROKEN */
static const char mii_2_8139_map[8] = {
BasicModeCtrl,
BasicModeStatus,
0,
0,
NWayAdvert,
NWayLPAR,
NWayExpansion,
0
};
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct cp_private *cp = netdev_priv(dev);
return location < 8 && mii_2_8139_map[location] ?
readw(cp->regs + mii_2_8139_map[location]) : 0;
}
static void mdio_write(struct net_device *dev, int phy_id, int location,
int value)
{
struct cp_private *cp = netdev_priv(dev);
if (location == 0) {
cpw8(Cfg9346, Cfg9346_Unlock);
cpw16(BasicModeCtrl, value);
cpw8(Cfg9346, Cfg9346_Lock);
} else if (location < 8 && mii_2_8139_map[location])
cpw16(mii_2_8139_map[location], value);
}
/* Set the ethtool Wake-on-LAN settings */
static int netdev_set_wol (struct cp_private *cp,
const struct ethtool_wolinfo *wol)
{
u8 options;
options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
/* If WOL is being disabled, no need for complexity */
if (wol->wolopts) {
if (wol->wolopts & WAKE_PHY) options |= LinkUp;
if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
}
cpw8 (Cfg9346, Cfg9346_Unlock);
cpw8 (Config3, options);
cpw8 (Cfg9346, Cfg9346_Lock);
options = 0; /* Paranoia setting */
options = cpr8 (Config5) & ~(UWF | MWF | BWF);
/* If WOL is being disabled, no need for complexity */
if (wol->wolopts) {
if (wol->wolopts & WAKE_UCAST) options |= UWF;
if (wol->wolopts & WAKE_BCAST) options |= BWF;
if (wol->wolopts & WAKE_MCAST) options |= MWF;
}
cpw8 (Config5, options);
cp->wol_enabled = (wol->wolopts) ? 1 : 0;
return 0;
}
/* Get the ethtool Wake-on-LAN settings */
static void netdev_get_wol (struct cp_private *cp,
struct ethtool_wolinfo *wol)
{
u8 options;
wol->wolopts = 0; /* Start from scratch */
wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
WAKE_MCAST | WAKE_UCAST;
/* We don't need to go on if WOL is disabled */
if (!cp->wol_enabled) return;
options = cpr8 (Config3);
if (options & LinkUp) wol->wolopts |= WAKE_PHY;
if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
options = 0; /* Paranoia setting */
options = cpr8 (Config5);
if (options & UWF) wol->wolopts |= WAKE_UCAST;
if (options & BWF) wol->wolopts |= WAKE_BCAST;
if (options & MWF) wol->wolopts |= WAKE_MCAST;
}
static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cp_private *cp = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
ring->rx_max_pending = CP_RX_RING_SIZE;
ring->tx_max_pending = CP_TX_RING_SIZE;
ring->rx_pending = CP_RX_RING_SIZE;
ring->tx_pending = CP_TX_RING_SIZE;
}
static int cp_get_regs_len(struct net_device *dev)
{
return CP_REGS_SIZE;
}
static int cp_get_sset_count (struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return CP_NUM_STATS;
default:
return -EOPNOTSUPP;
}
}
static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
rc = mii_ethtool_gset(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
spin_lock_irqsave(&cp->lock, flags);
rc = mii_ethtool_sset(&cp->mii_if, cmd);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
static int cp_nway_reset(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
return mii_nway_restart(&cp->mii_if);
}
static u32 cp_get_msglevel(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
return cp->msg_enable;
}
static void cp_set_msglevel(struct net_device *dev, u32 value)
{
struct cp_private *cp = netdev_priv(dev);
cp->msg_enable = value;
}
static int cp_set_features(struct net_device *dev, netdev_features_t features)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
if (!((dev->features ^ features) & NETIF_F_RXCSUM))
return 0;
spin_lock_irqsave(&cp->lock, flags);
if (features & NETIF_F_RXCSUM)
cp->cpcmd |= RxChkSum;
else
cp->cpcmd &= ~RxChkSum;
if (features & NETIF_F_HW_VLAN_RX)
cp->cpcmd |= RxVlanOn;
else
cp->cpcmd &= ~RxVlanOn;
cpw16_f(CpCmd, cp->cpcmd);
spin_unlock_irqrestore(&cp->lock, flags);
return 0;
}
static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
if (regs->len < CP_REGS_SIZE)
return /* -EINVAL */;
regs->version = CP_REGS_VER;
spin_lock_irqsave(&cp->lock, flags);
memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
spin_unlock_irqrestore(&cp->lock, flags);
}
static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave (&cp->lock, flags);
netdev_get_wol (cp, wol);
spin_unlock_irqrestore (&cp->lock, flags);
}
static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
int rc;
spin_lock_irqsave (&cp->lock, flags);
rc = netdev_set_wol (cp, wol);
spin_unlock_irqrestore (&cp->lock, flags);
return rc;
}
static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
break;
default:
BUG();
break;
}
}
static void cp_get_ethtool_stats (struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct cp_private *cp = netdev_priv(dev);
struct cp_dma_stats *nic_stats;
dma_addr_t dma;
int i;
nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
&dma, GFP_KERNEL);
if (!nic_stats)
return;
/* begin NIC statistics dump */
cpw32(StatsAddr + 4, (u64)dma >> 32);
cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
cpr32(StatsAddr);
for (i = 0; i < 1000; i++) {
if ((cpr32(StatsAddr) & DumpStats) == 0)
break;
udelay(10);
}
cpw32(StatsAddr, 0);
cpw32(StatsAddr + 4, 0);
cpr32(StatsAddr);
i = 0;
tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
tmp_stats[i++] = cp->cp_stats.rx_frags;
BUG_ON(i != CP_NUM_STATS);
dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
}
static const struct ethtool_ops cp_ethtool_ops = {
.get_drvinfo = cp_get_drvinfo,
.get_regs_len = cp_get_regs_len,
.get_sset_count = cp_get_sset_count,
.get_settings = cp_get_settings,
.set_settings = cp_set_settings,
.nway_reset = cp_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = cp_get_msglevel,
.set_msglevel = cp_set_msglevel,
.get_regs = cp_get_regs,
.get_wol = cp_get_wol,
.set_wol = cp_set_wol,
.get_strings = cp_get_strings,
.get_ethtool_stats = cp_get_ethtool_stats,
.get_eeprom_len = cp_get_eeprom_len,
.get_eeprom = cp_get_eeprom,
.set_eeprom = cp_set_eeprom,
.get_ringparam = cp_get_ringparam,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
struct cp_private *cp = netdev_priv(dev);
int rc;
unsigned long flags;
if (!netif_running(dev))
return -EINVAL;
spin_lock_irqsave(&cp->lock, flags);
rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
spin_unlock_irqrestore(&cp->lock, flags);
return rc;
}
static int cp_set_mac_address(struct net_device *dev, void *p)
{
struct cp_private *cp = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&cp->lock);
cpw8_f(Cfg9346, Cfg9346_Unlock);
cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
cpw8_f(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&cp->lock);
return 0;
}
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
#define EE_WRITE_0 0x00
#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
#define EE_ENB (0x80 | EE_CS)
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
#define eeprom_delay() readb(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_EXTEND_CMD (4)
#define EE_WRITE_CMD (5)
#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
#define EE_EWDS_ADDR (0)
#define EE_WRAL_ADDR (1)
#define EE_ERAL_ADDR (2)
#define EE_EWEN_ADDR (3)
#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
static void eeprom_cmd_start(void __iomem *ee_addr)
{
writeb (EE_ENB & ~EE_CS, ee_addr);
writeb (EE_ENB, ee_addr);
eeprom_delay ();
}
static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
{
int i;
/* Shift the command bits out. */
for (i = cmd_len - 1; i >= 0; i--) {
int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
writeb (EE_ENB | dataval, ee_addr);
eeprom_delay ();
writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
eeprom_delay ();
}
writeb (EE_ENB, ee_addr);
eeprom_delay ();
}
static void eeprom_cmd_end(void __iomem *ee_addr)
{
writeb (~EE_CS, ee_addr);
eeprom_delay ();
}
static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
int addr_len)
{
int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
eeprom_cmd_start(ee_addr);
eeprom_cmd(ee_addr, cmd, 3 + addr_len);
eeprom_cmd_end(ee_addr);
}
static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
{
int i;
u16 retval = 0;
void __iomem *ee_addr = ioaddr + Cfg9346;
int read_cmd = location | (EE_READ_CMD << addr_len);
eeprom_cmd_start(ee_addr);
eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
for (i = 16; i > 0; i--) {
writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
eeprom_delay ();
retval =
(retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
0);
writeb (EE_ENB, ee_addr);
eeprom_delay ();
}
eeprom_cmd_end(ee_addr);
return retval;
}
static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
int addr_len)
{
int i;
void __iomem *ee_addr = ioaddr + Cfg9346;
int write_cmd = location | (EE_WRITE_CMD << addr_len);
eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
eeprom_cmd_start(ee_addr);
eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
eeprom_cmd(ee_addr, val, 16);
eeprom_cmd_end(ee_addr);
eeprom_cmd_start(ee_addr);
for (i = 0; i < 20000; i++)
if (readb(ee_addr) & EE_DATA_READ)
break;
eeprom_cmd_end(ee_addr);
eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
}
static int cp_get_eeprom_len(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
int size;
spin_lock_irq(&cp->lock);
size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
spin_unlock_irq(&cp->lock);
return size;
}
static int cp_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct cp_private *cp = netdev_priv(dev);
unsigned int addr_len;
u16 val;
u32 offset = eeprom->offset >> 1;
u32 len = eeprom->len;
u32 i = 0;
eeprom->magic = CP_EEPROM_MAGIC;
spin_lock_irq(&cp->lock);
addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
if (eeprom->offset & 1) {
val = read_eeprom(cp->regs, offset, addr_len);
data[i++] = (u8)(val >> 8);
offset++;
}
while (i < len - 1) {
val = read_eeprom(cp->regs, offset, addr_len);
data[i++] = (u8)val;
data[i++] = (u8)(val >> 8);
offset++;
}
if (i < len) {
val = read_eeprom(cp->regs, offset, addr_len);
data[i] = (u8)val;
}
spin_unlock_irq(&cp->lock);
return 0;
}
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct cp_private *cp = netdev_priv(dev);
unsigned int addr_len;
u16 val;
u32 offset = eeprom->offset >> 1;
u32 len = eeprom->len;
u32 i = 0;
if (eeprom->magic != CP_EEPROM_MAGIC)
return -EINVAL;
spin_lock_irq(&cp->lock);
addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
if (eeprom->offset & 1) {
val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
val |= (u16)data[i++] << 8;
write_eeprom(cp->regs, offset, val, addr_len);
offset++;
}
while (i < len - 1) {
val = (u16)data[i++];
val |= (u16)data[i++] << 8;
write_eeprom(cp->regs, offset, val, addr_len);
offset++;
}
if (i < len) {
val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
val |= (u16)data[i];
write_eeprom(cp->regs, offset, val, addr_len);
}
spin_unlock_irq(&cp->lock);
return 0;
}
/* Put the board into D3cold state and wait for WakeUp signal */
static void cp_set_d3_state (struct cp_private *cp)
{
pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
pci_set_power_state (cp->pdev, PCI_D3hot);
}
static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open,
.ndo_stop = cp_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = cp_set_mac_address,
.ndo_set_rx_mode = cp_set_rx_mode,
.ndo_get_stats = cp_get_stats,
.ndo_do_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
#ifdef BROKEN
.ndo_change_mtu = cp_change_mtu,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
#endif
};
static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct cp_private *cp;
int rc;
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
#ifndef MODULE
static int version_printed;
if (version_printed++ == 0)
pr_info("%s", version);
#endif
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
dev_info(&pdev->dev,
"This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
pdev->vendor, pdev->device, pdev->revision);
return -ENODEV;
}
dev = alloc_etherdev(sizeof(struct cp_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
cp = netdev_priv(dev);
cp->pdev = pdev;
cp->dev = dev;
cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
spin_lock_init (&cp->lock);
cp->mii_if.dev = dev;
cp->mii_if.mdio_read = mdio_read;
cp->mii_if.mdio_write = mdio_write;
cp->mii_if.phy_id = CP_INTERNAL_PHY;
cp->mii_if.phy_id_mask = 0x1f;
cp->mii_if.reg_num_mask = 0x1f;
cp_set_rxbufsize(cp);
rc = pci_enable_device(pdev);
if (rc)
goto err_out_free;
rc = pci_set_mwi(pdev);
if (rc)
goto err_out_disable;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_mwi;
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
dev_err(&pdev->dev, "no MMIO resource\n");
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
rc = -EIO;
dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
(unsigned long long)pci_resource_len(pdev, 1));
goto err_out_res;
}
/* Configure DMA attributes. */
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
pci_using_dac = 0;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"No usable DMA configuration, aborting\n");
goto err_out_res;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
"No usable consistent DMA configuration, aborting\n");
goto err_out_res;
}
}
cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_RXCSUM;
regs = ioremap(pciaddr, CP_REGS_SIZE);
if (!regs) {
rc = -EIO;
dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
(unsigned long long)pci_resource_len(pdev, 1),
(unsigned long long)pciaddr);
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
cp->regs = regs;
cp_stop_hw(cp);
/* read MAC address from EEPROM */
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
/* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
dev->irq = pdev->irq;
rc = register_netdev(dev);
if (rc)
goto err_out_iomap;
netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
dev->base_addr, dev->dev_addr, dev->irq);
pci_set_drvdata(pdev, dev);
/* enable busmastering and memory-write-invalidate */
pci_set_master(pdev);
if (cp->wol_enabled)
cp_set_d3_state (cp);
return 0;
err_out_iomap:
iounmap(regs);
err_out_res:
pci_release_regions(pdev);
err_out_mwi:
pci_clear_mwi(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out_free:
free_netdev(dev);
return rc;
}
static void cp_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev);
unregister_netdev(dev);
iounmap(cp->regs);
if (cp->wol_enabled)
pci_set_power_state (pdev, PCI_D0);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
#ifdef CONFIG_PM
static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
if (!netif_running(dev))
return 0;
netif_device_detach (dev);
netif_stop_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
/* Disable Rx and Tx */
cpw16 (IntrMask, 0);
cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
spin_unlock_irqrestore (&cp->lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int cp_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
if (!netif_running(dev))
return 0;
netif_device_attach (dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
cp_enable_irq(cp);
netif_start_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
spin_unlock_irqrestore (&cp->lock, flags);
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver cp_driver = {
.name = DRV_NAME,
.id_table = cp_pci_tbl,
.probe = cp_init_one,
.remove = cp_remove_one,
#ifdef CONFIG_PM
.resume = cp_resume,
.suspend = cp_suspend,
#endif
};
static int __init cp_init (void)
{
#ifdef MODULE
pr_info("%s", version);
#endif
return pci_register_driver(&cp_driver);
}
static void __exit cp_exit (void)
{
pci_unregister_driver (&cp_driver);
}
module_init(cp_init);
module_exit(cp_exit);
| gpl-2.0 |
imoseyon/leanKernel-galaxy-nexus | init/calibrate.c | 2227 | 7731 | /* calibrate.c: default delay calibration
*
* Excised from init/main.c
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/smp.h>
unsigned long lpj_fine;
unsigned long preset_lpj;
static int __init lpj_setup(char *str)
{
preset_lpj = simple_strtoul(str,NULL,0);
return 1;
}
__setup("lpj=", lpj_setup);
#ifdef ARCH_HAS_READ_CURRENT_TIMER
/* This routine uses the read_current_timer() routine and gets the
* loops per jiffy directly, instead of guessing it using delay().
* Also, this code tries to handle non-maskable asynchronous events
* (like SMIs)
*/
#define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
#define MAX_DIRECT_CALIBRATION_RETRIES 5
static unsigned long __cpuinit calibrate_delay_direct(void)
{
unsigned long pre_start, start, post_start;
unsigned long pre_end, end, post_end;
unsigned long start_jiffies;
unsigned long timer_rate_min, timer_rate_max;
unsigned long good_timer_sum = 0;
unsigned long good_timer_count = 0;
unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
int max = -1; /* index of measured_times with max/min values or not set */
int min = -1;
int i;
if (read_current_timer(&pre_start) < 0 )
return 0;
/*
* A simple loop like
* while ( jiffies < start_jiffies+1)
* start = read_current_timer();
* will not do. As we don't really know whether jiffy switch
* happened first or timer_value was read first. And some asynchronous
* event can happen between these two events introducing errors in lpj.
*
* So, we do
* 1. pre_start <- When we are sure that jiffy switch hasn't happened
* 2. check jiffy switch
* 3. start <- timer value before or after jiffy switch
* 4. post_start <- When we are sure that jiffy switch has happened
*
* Note, we don't know anything about order of 2 and 3.
* Now, by looking at post_start and pre_start difference, we can
* check whether any asynchronous event happened or not
*/
for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
pre_start = 0;
read_current_timer(&start);
start_jiffies = jiffies;
while (time_before_eq(jiffies, start_jiffies + 1)) {
pre_start = start;
read_current_timer(&start);
}
read_current_timer(&post_start);
pre_end = 0;
end = post_start;
while (time_before_eq(jiffies, start_jiffies + 1 +
DELAY_CALIBRATION_TICKS)) {
pre_end = end;
read_current_timer(&end);
}
read_current_timer(&post_end);
timer_rate_max = (post_end - pre_start) /
DELAY_CALIBRATION_TICKS;
timer_rate_min = (pre_end - post_start) /
DELAY_CALIBRATION_TICKS;
/*
* If the upper limit and lower limit of the timer_rate is
* >= 12.5% apart, redo calibration.
*/
if (start >= post_end)
printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
"timer_rate as we had a TSC wrap around"
" start=%lu >=post_end=%lu\n",
start, post_end);
if (start < post_end && pre_start != 0 && pre_end != 0 &&
(timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
good_timer_count++;
good_timer_sum += timer_rate_max;
measured_times[i] = timer_rate_max;
if (max < 0 || timer_rate_max > measured_times[max])
max = i;
if (min < 0 || timer_rate_max < measured_times[min])
min = i;
} else
measured_times[i] = 0;
}
/*
* Find the maximum & minimum - if they differ too much throw out the
* one with the largest difference from the mean and try again...
*/
while (good_timer_count > 1) {
unsigned long estimate;
unsigned long maxdiff;
/* compute the estimate */
estimate = (good_timer_sum/good_timer_count);
maxdiff = estimate >> 3;
/* if range is within 12% let's take it */
if ((measured_times[max] - measured_times[min]) < maxdiff)
return estimate;
/* ok - drop the worse value and try again... */
good_timer_sum = 0;
good_timer_count = 0;
if ((measured_times[max] - estimate) <
(estimate - measured_times[min])) {
printk(KERN_NOTICE "calibrate_delay_direct() dropping "
"min bogoMips estimate %d = %lu\n",
min, measured_times[min]);
measured_times[min] = 0;
min = max;
} else {
printk(KERN_NOTICE "calibrate_delay_direct() dropping "
"max bogoMips estimate %d = %lu\n",
max, measured_times[max]);
measured_times[max] = 0;
max = min;
}
for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
if (measured_times[i] == 0)
continue;
good_timer_count++;
good_timer_sum += measured_times[i];
if (measured_times[i] < measured_times[min])
min = i;
if (measured_times[i] > measured_times[max])
max = i;
}
}
printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
"estimate for loops_per_jiffy.\nProbably due to long platform "
"interrupts. Consider using \"lpj=\" boot option.\n");
return 0;
}
#else
static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
#endif
/*
* This is the number of bits of precision for the loops_per_jiffy. Each
* time we refine our estimate after the first takes 1.5/HZ seconds, so try
* to start with a good estimate.
* For the boot cpu we can skip the delay calibration and assign it a value
* calculated based on the timer frequency.
* For the rest of the CPUs we cannot assume that the timer frequency is same as
* the cpu frequency, hence do the calibration for those.
*/
#define LPS_PREC 8
static unsigned long __cpuinit calibrate_delay_converge(void)
{
/* First stage - slowly accelerate to find initial bounds */
unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
int trials = 0, band = 0, trial_in_band = 0;
lpj = (1<<12);
/* wait for "start of" clock tick */
ticks = jiffies;
while (ticks == jiffies)
; /* nothing */
/* Go .. */
ticks = jiffies;
do {
if (++trial_in_band == (1<<band)) {
++band;
trial_in_band = 0;
}
__delay(lpj * band);
trials += band;
} while (ticks == jiffies);
/*
* We overshot, so retreat to a clear underestimate. Then estimate
* the largest likely undershoot. This defines our chop bounds.
*/
trials -= band;
loopadd_base = lpj * band;
lpj_base = lpj * trials;
recalibrate:
lpj = lpj_base;
loopadd = loopadd_base;
/*
* Do a binary approximation to get lpj set to
* equal one clock (up to LPS_PREC bits)
*/
chop_limit = lpj >> LPS_PREC;
while (loopadd > chop_limit) {
lpj += loopadd;
ticks = jiffies;
while (ticks == jiffies)
; /* nothing */
ticks = jiffies;
__delay(lpj);
if (jiffies != ticks) /* longer than 1 tick */
lpj -= loopadd;
loopadd >>= 1;
}
/*
* If we incremented every single time possible, presume we've
* massively underestimated initially, and retry with a higher
* start, and larger range. (Only seen on x86_64, due to SMIs)
*/
if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
lpj_base = lpj;
loopadd_base <<= 2;
goto recalibrate;
}
return lpj;
}
void __cpuinit calibrate_delay(void)
{
unsigned long lpj;
static bool printed;
if (preset_lpj) {
lpj = preset_lpj;
if (!printed)
pr_info("Calibrating delay loop (skipped) "
"preset value.. ");
} else if ((!printed) && lpj_fine) {
lpj = lpj_fine;
pr_info("Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((lpj = calibrate_delay_direct()) != 0) {
if (!printed)
pr_info("Calibrating delay using timer "
"specific routine.. ");
} else {
if (!printed)
pr_info("Calibrating delay loop... ");
lpj = calibrate_delay_converge();
}
if (!printed)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
lpj/(500000/HZ),
(lpj/(5000/HZ)) % 100, lpj);
loops_per_jiffy = lpj;
printed = true;
}
| gpl-2.0 |
mingit/mstcp_v0.89.4 | drivers/clk/mxs/clk-div.c | 4019 | 2685 | /*
* Copyright 2012 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/slab.h>
#include "clk.h"
/**
* struct clk_div - mxs integer divider clock
* @divider: the parent class
* @ops: pointer to clk_ops of parent class
* @reg: register address
* @busy: busy bit shift
*
* The mxs divider clock is a subclass of basic clk_divider with an
* addtional busy bit.
*/
struct clk_div {
struct clk_divider divider;
const struct clk_ops *ops;
void __iomem *reg;
u8 busy;
};
static inline struct clk_div *to_clk_div(struct clk_hw *hw)
{
struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
return container_of(divider, struct clk_div, divider);
}
static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_div *div = to_clk_div(hw);
return div->ops->recalc_rate(&div->divider.hw, parent_rate);
}
static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_div *div = to_clk_div(hw);
return div->ops->round_rate(&div->divider.hw, rate, prate);
}
static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_div *div = to_clk_div(hw);
int ret;
ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
if (!ret)
ret = mxs_clk_wait(div->reg, div->busy);
return ret;
}
static struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
.round_rate = clk_div_round_rate,
.set_rate = clk_div_set_rate,
};
struct clk *mxs_clk_div(const char *name, const char *parent_name,
void __iomem *reg, u8 shift, u8 width, u8 busy)
{
struct clk_div *div;
struct clk *clk;
struct clk_init_data init;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &clk_div_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
div->reg = reg;
div->busy = busy;
div->divider.reg = reg;
div->divider.shift = shift;
div->divider.width = width;
div->divider.flags = CLK_DIVIDER_ONE_BASED;
div->divider.lock = &mxs_lock;
div->divider.hw.init = &init;
div->ops = &clk_divider_ops;
clk = clk_register(NULL, &div->divider.hw);
if (IS_ERR(clk))
kfree(div);
return clk;
}
| gpl-2.0 |
arokux/linux | drivers/ide/icside.c | 4275 | 16635 | /*
* Copyright (c) 1996-2004 Russell King.
*
* Please note that this platform does not support 32-bit IDE IO.
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/errno.h>
#include <linux/ide.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#define DRV_NAME "icside"
#define ICS_IDENT_OFFSET 0x2280
#define ICS_ARCIN_V5_INTRSTAT 0x0000
#define ICS_ARCIN_V5_INTROFFSET 0x0004
#define ICS_ARCIN_V5_IDEOFFSET 0x2800
#define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
#define ICS_ARCIN_V5_IDESTEPPING 6
#define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
#define ICS_ARCIN_V6_INTROFFSET_1 0x2200
#define ICS_ARCIN_V6_INTRSTAT_1 0x2290
#define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
#define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
#define ICS_ARCIN_V6_INTROFFSET_2 0x3200
#define ICS_ARCIN_V6_INTRSTAT_2 0x3290
#define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
#define ICS_ARCIN_V6_IDESTEPPING 6
struct cardinfo {
unsigned int dataoffset;
unsigned int ctrloffset;
unsigned int stepping;
};
static struct cardinfo icside_cardinfo_v5 = {
.dataoffset = ICS_ARCIN_V5_IDEOFFSET,
.ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET,
.stepping = ICS_ARCIN_V5_IDESTEPPING,
};
static struct cardinfo icside_cardinfo_v6_1 = {
.dataoffset = ICS_ARCIN_V6_IDEOFFSET_1,
.ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1,
.stepping = ICS_ARCIN_V6_IDESTEPPING,
};
static struct cardinfo icside_cardinfo_v6_2 = {
.dataoffset = ICS_ARCIN_V6_IDEOFFSET_2,
.ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2,
.stepping = ICS_ARCIN_V6_IDESTEPPING,
};
struct icside_state {
unsigned int channel;
unsigned int enabled;
void __iomem *irq_port;
void __iomem *ioc_base;
unsigned int sel;
unsigned int type;
struct ide_host *host;
};
#define ICS_TYPE_A3IN 0
#define ICS_TYPE_A3USER 1
#define ICS_TYPE_V6 3
#define ICS_TYPE_V5 15
#define ICS_TYPE_NOTYPE ((unsigned int)-1)
/* ---------------- Version 5 PCB Support Functions --------------------- */
/* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
* Purpose : enable interrupts from card
*/
static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
}
/* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
* Purpose : disable interrupts from card
*/
static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
}
static const expansioncard_ops_t icside_ops_arcin_v5 = {
.irqenable = icside_irqenable_arcin_v5,
.irqdisable = icside_irqdisable_arcin_v5,
};
/* ---------------- Version 6 PCB Support Functions --------------------- */
/* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
* Purpose : enable interrupts from card
*/
static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
void __iomem *base = state->irq_port;
state->enabled = 1;
switch (state->channel) {
case 0:
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
readb(base + ICS_ARCIN_V6_INTROFFSET_2);
break;
case 1:
writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
readb(base + ICS_ARCIN_V6_INTROFFSET_1);
break;
}
}
/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
* Purpose : disable interrupts from card
*/
static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
state->enabled = 0;
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
}
/* Prototype: icside_irqprobe(struct expansion_card *ec)
* Purpose : detect an active interrupt from card
*/
static int icside_irqpending_arcin_v6(struct expansion_card *ec)
{
struct icside_state *state = ec->irq_data;
return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
}
static const expansioncard_ops_t icside_ops_arcin_v6 = {
.irqenable = icside_irqenable_arcin_v6,
.irqdisable = icside_irqdisable_arcin_v6,
.irqpending = icside_irqpending_arcin_v6,
};
/*
* Handle routing of interrupts. This is called before
* we write the command to the drive.
*/
static void icside_maskproc(ide_drive_t *drive, int mask)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
struct icside_state *state = ecard_get_drvdata(ec);
unsigned long flags;
local_irq_save(flags);
state->channel = hwif->channel;
if (state->enabled && !mask) {
switch (hwif->channel) {
case 0:
writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
break;
case 1:
writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
break;
}
} else {
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
}
local_irq_restore(flags);
}
static const struct ide_port_ops icside_v6_no_dma_port_ops = {
.maskproc = icside_maskproc,
};
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
/*
* SG-DMA support.
*
* Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
* There is only one DMA controller per card, which means that only
* one drive can be accessed at one time. NOTE! We do not enforce that
* here, but we rely on the main IDE driver spotting that both
* interfaces use the same IRQ, which should guarantee this.
*/
/*
* Configure the IOMD to give the appropriate timings for the transfer
* mode being requested. We take the advice of the ATA standards, and
* calculate the cycle time based on the transfer mode, and the EIDE
* MW DMA specs that the drive provides in the IDENTIFY command.
*
* We have the following IOMD DMA modes to choose from:
*
* Type Active Recovery Cycle
* A 250 (250) 312 (550) 562 (800)
* B 187 250 437
* C 125 (125) 125 (375) 250 (500)
* D 62 125 187
*
* (figures in brackets are actual measured timings)
*
* However, we also need to take care of the read/write active and
* recovery timings:
*
* Read Write
* Mode Active -- Recovery -- Cycle IOMD type
* MW0 215 50 215 480 A
* MW1 80 50 50 150 C
* MW2 70 25 25 120 C
*/
static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long cycle_time = 0;
int use_dma_info = 0;
const u8 xfer_mode = drive->dma_mode;
switch (xfer_mode) {
case XFER_MW_DMA_2:
cycle_time = 250;
use_dma_info = 1;
break;
case XFER_MW_DMA_1:
cycle_time = 250;
use_dma_info = 1;
break;
case XFER_MW_DMA_0:
cycle_time = 480;
break;
case XFER_SW_DMA_2:
case XFER_SW_DMA_1:
case XFER_SW_DMA_0:
cycle_time = 480;
break;
}
/*
* If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
* take care to note the values in the ID...
*/
if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time)
cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME];
ide_set_drivedata(drive, (void *)cycle_time);
printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
drive->name, ide_xfer_verbose(xfer_mode),
2000 / (cycle_time ? cycle_time : (unsigned long) -1));
}
static const struct ide_port_ops icside_v6_port_ops = {
.set_dma_mode = icside_set_dma_mode,
.maskproc = icside_maskproc,
};
static void icside_dma_host_set(ide_drive_t *drive, int on)
{
}
static int icside_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
disable_dma(ec->dma);
return get_dma_residue(ec->dma) != 0;
}
static void icside_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
/* We can not enable DMA on both channels simultaneously. */
BUG_ON(dma_channel_active(ec->dma));
enable_dma(ec->dma);
}
static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
struct icside_state *state = ecard_get_drvdata(ec);
unsigned int dma_mode;
if (cmd->tf_flags & IDE_TFLAG_WRITE)
dma_mode = DMA_MODE_WRITE;
else
dma_mode = DMA_MODE_READ;
/*
* We can not enable DMA on both channels.
*/
BUG_ON(dma_channel_active(ec->dma));
/*
* Ensure that we have the right interrupt routed.
*/
icside_maskproc(drive, 0);
/*
* Route the DMA signals to the correct interface.
*/
writeb(state->sel | hwif->channel, state->ioc_base);
/*
* Select the correct timing for this drive.
*/
set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive));
/*
* Tell the DMA engine about the SG table and
* data direction.
*/
set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents);
set_dma_mode(ec->dma, dma_mode);
return 0;
}
static int icside_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct expansion_card *ec = ECARD_DEV(hwif->dev);
struct icside_state *state = ecard_get_drvdata(ec);
return readb(state->irq_port +
(hwif->channel ?
ICS_ARCIN_V6_INTRSTAT_2 :
ICS_ARCIN_V6_INTRSTAT_1)) & 1;
}
static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
hwif->dmatable_cpu = NULL;
hwif->dmatable_dma = 0;
return 0;
}
static const struct ide_dma_ops icside_v6_dma_ops = {
.dma_host_set = icside_dma_host_set,
.dma_setup = icside_dma_setup,
.dma_start = icside_dma_start,
.dma_end = icside_dma_end,
.dma_test_irq = icside_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
};
#endif
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
return -EOPNOTSUPP;
}
static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
struct cardinfo *info, struct expansion_card *ec)
{
unsigned long port = (unsigned long)base + info->dataoffset;
hw->io_ports.data_addr = port;
hw->io_ports.error_addr = port + (1 << info->stepping);
hw->io_ports.nsect_addr = port + (2 << info->stepping);
hw->io_ports.lbal_addr = port + (3 << info->stepping);
hw->io_ports.lbam_addr = port + (4 << info->stepping);
hw->io_ports.lbah_addr = port + (5 << info->stepping);
hw->io_ports.device_addr = port + (6 << info->stepping);
hw->io_ports.status_addr = port + (7 << info->stepping);
hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset;
hw->irq = ec->irq;
hw->dev = &ec->dev;
}
static const struct ide_port_info icside_v5_port_info = {
.host_flags = IDE_HFLAG_NO_DMA,
.chipset = ide_acorn,
};
static int icside_register_v5(struct icside_state *state,
struct expansion_card *ec)
{
void __iomem *base;
struct ide_host *host;
struct ide_hw hw, *hws[] = { &hw };
int ret;
base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
if (!base)
return -ENOMEM;
state->irq_port = base;
ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
ec->irqmask = 1;
ecard_setirq(ec, &icside_ops_arcin_v5, state);
/*
* Be on the safe side - disable interrupts
*/
icside_irqdisable_arcin_v5(ec, 0);
icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
host = ide_host_alloc(&icside_v5_port_info, hws, 1);
if (host == NULL)
return -ENODEV;
state->host = host;
ecard_set_drvdata(ec, state);
ret = ide_host_register(host, &icside_v5_port_info, hws);
if (ret)
goto err_free;
return 0;
err_free:
ide_host_free(host);
ecard_set_drvdata(ec, NULL);
return ret;
}
static const struct ide_port_info icside_v6_port_info __initconst = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
.chipset = ide_acorn,
};
static int icside_register_v6(struct icside_state *state,
struct expansion_card *ec)
{
void __iomem *ioc_base, *easi_base;
struct ide_host *host;
unsigned int sel = 0;
int ret;
struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
struct ide_port_info d = icside_v6_port_info;
ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
if (!ioc_base) {
ret = -ENOMEM;
goto out;
}
easi_base = ioc_base;
if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
if (!easi_base) {
ret = -ENOMEM;
goto out;
}
/*
* Enable access to the EASI region.
*/
sel = 1 << 5;
}
writeb(sel, ioc_base);
ecard_setirq(ec, &icside_ops_arcin_v6, state);
state->irq_port = easi_base;
state->ioc_base = ioc_base;
state->sel = sel;
/*
* Be on the safe side - disable interrupts
*/
icside_irqdisable_arcin_v6(ec, 0);
icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
host = ide_host_alloc(&d, hws, 2);
if (host == NULL)
return -ENODEV;
state->host = host;
ecard_set_drvdata(ec, state);
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_port_ops;
d.dma_ops = &icside_v6_dma_ops;
}
#endif
ret = ide_host_register(host, &d, hws);
if (ret)
goto err_free;
return 0;
err_free:
ide_host_free(host);
if (d.dma_ops)
free_dma(ec->dma);
ecard_set_drvdata(ec, NULL);
out:
return ret;
}
static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct icside_state *state;
void __iomem *idmem;
int ret;
ret = ecard_request_resources(ec);
if (ret)
goto out;
state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
if (!state) {
ret = -ENOMEM;
goto release;
}
state->type = ICS_TYPE_NOTYPE;
idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
if (idmem) {
unsigned int type;
type = readb(idmem + ICS_IDENT_OFFSET) & 1;
type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
ecardm_iounmap(ec, idmem);
state->type = type;
}
switch (state->type) {
case ICS_TYPE_A3IN:
dev_warn(&ec->dev, "A3IN unsupported\n");
ret = -ENODEV;
break;
case ICS_TYPE_A3USER:
dev_warn(&ec->dev, "A3USER unsupported\n");
ret = -ENODEV;
break;
case ICS_TYPE_V5:
ret = icside_register_v5(state, ec);
break;
case ICS_TYPE_V6:
ret = icside_register_v6(state, ec);
break;
default:
dev_warn(&ec->dev, "unknown interface type\n");
ret = -ENODEV;
break;
}
if (ret == 0)
goto out;
kfree(state);
release:
ecard_release_resources(ec);
out:
return ret;
}
static void icside_remove(struct expansion_card *ec)
{
struct icside_state *state = ecard_get_drvdata(ec);
switch (state->type) {
case ICS_TYPE_V5:
/* FIXME: tell IDE to stop using the interface */
/* Disable interrupts */
icside_irqdisable_arcin_v5(ec, 0);
break;
case ICS_TYPE_V6:
/* FIXME: tell IDE to stop using the interface */
if (ec->dma != NO_DMA)
free_dma(ec->dma);
/* Disable interrupts */
icside_irqdisable_arcin_v6(ec, 0);
/* Reset the ROM pointer/EASI selection */
writeb(0, state->ioc_base);
break;
}
ecard_set_drvdata(ec, NULL);
kfree(state);
ecard_release_resources(ec);
}
static void icside_shutdown(struct expansion_card *ec)
{
struct icside_state *state = ecard_get_drvdata(ec);
unsigned long flags;
/*
* Disable interrupts from this card. We need to do
* this before disabling EASI since we may be accessing
* this register via that region.
*/
local_irq_save(flags);
ec->ops->irqdisable(ec, 0);
local_irq_restore(flags);
/*
* Reset the ROM pointer so that we can read the ROM
* after a soft reboot. This also disables access to
* the IDE taskfile via the EASI region.
*/
if (state->ioc_base)
writeb(0, state->ioc_base);
}
static const struct ecard_id icside_ids[] = {
{ MANU_ICS, PROD_ICS_IDE },
{ MANU_ICS2, PROD_ICS2_IDE },
{ 0xffff, 0xffff }
};
static struct ecard_driver icside_driver = {
.probe = icside_probe,
.remove = icside_remove,
.shutdown = icside_shutdown,
.id_table = icside_ids,
.drv = {
.name = "icside",
},
};
static int __init icside_init(void)
{
return ecard_register_driver(&icside_driver);
}
static void __exit icside_exit(void)
{
ecard_remove_driver(&icside_driver);
}
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ICS IDE driver");
module_init(icside_init);
module_exit(icside_exit);
| gpl-2.0 |
thicklizard/HTC-one-android-kernel | drivers/isdn/hysdn/hysdn_proclog.c | 4787 | 12218 | /* $Id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai Exp $
*
* Linux driver for HYSDN cards, /proc/net filesystem log functions.
*
* Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
* Copyright 1999 by Werner Cornelius (werner@titro.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
#include "hysdn_defs.h"
/* the proc subdir for the interface is defined in the procconf module */
extern struct proc_dir_entry *hysdn_proc_entry;
static DEFINE_MUTEX(hysdn_log_mutex);
static void put_log_buffer(hysdn_card *card, char *cp);
/*************************************************/
/* structure keeping ascii log for device output */
/*************************************************/
struct log_data {
struct log_data *next;
unsigned long usage_cnt;/* number of files still to work */
void *proc_ctrl; /* pointer to own control procdata structure */
char log_start[2]; /* log string start (final len aligned by size) */
};
/**********************************************/
/* structure holding proc entrys for one card */
/**********************************************/
struct procdata {
struct proc_dir_entry *log; /* log entry */
char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */
int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue;
};
/**********************************************/
/* log function for cards error log interface */
/**********************************************/
void
hysdn_card_errlog(hysdn_card *card, tErrLogEntry *logp, int maxsize)
{
char buf[ERRLOG_TEXT_SIZE + 40];
sprintf(buf, "LOG 0x%08lX 0x%08lX : %s\n", logp->ulErrType, logp->ulErrSubtype, logp->ucText);
put_log_buffer(card, buf); /* output the string */
} /* hysdn_card_errlog */
/***************************************************/
/* Log function using format specifiers for output */
/***************************************************/
void
hysdn_addlog(hysdn_card *card, char *fmt, ...)
{
struct procdata *pd = card->proclog;
char *cp;
va_list args;
if (!pd)
return; /* log structure non existent */
cp = pd->logtmp;
cp += sprintf(cp, "HYSDN: card %d ", card->myid);
va_start(args, fmt);
cp += vsprintf(cp, fmt, args);
va_end(args);
*cp++ = '\n';
*cp = 0;
if (card->debug_flags & DEB_OUT_SYSLOG)
printk(KERN_INFO "%s", pd->logtmp);
else
put_log_buffer(card, pd->logtmp);
} /* hysdn_addlog */
/********************************************/
/* put an log buffer into the log queue. */
/* This buffer will be kept until all files */
/* opened for read got the contents. */
/* Flushes buffers not longer in use. */
/********************************************/
static void
put_log_buffer(hysdn_card *card, char *cp)
{
struct log_data *ib;
struct procdata *pd = card->proclog;
int i;
unsigned long flags;
if (!pd)
return;
if (!cp)
return;
if (!*cp)
return;
if (pd->if_used <= 0)
return; /* no open file for read */
if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), GFP_ATOMIC)))
return; /* no memory */
strcpy(ib->log_start, cp); /* set output string */
ib->next = NULL;
ib->proc_ctrl = pd; /* point to own control structure */
spin_lock_irqsave(&card->hysdn_lock, flags);
ib->usage_cnt = pd->if_used;
if (!pd->log_head)
pd->log_head = ib; /* new head */
else
pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */
i = pd->del_lock++; /* get lock state */
spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */
if (!i)
while (pd->log_head->next) {
if ((pd->log_head->usage_cnt <= 0) &&
(pd->log_head->next->usage_cnt <= 0)) {
ib = pd->log_head;
pd->log_head = pd->log_head->next;
kfree(ib);
} else
break;
} /* pd->log_head->next */
pd->del_lock--; /* release lock level */
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */
/******************************/
/* file operations and tables */
/******************************/
/****************************************/
/* write log file -> set log level bits */
/****************************************/
static ssize_t
hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
{
int rc;
unsigned char valbuf[128];
hysdn_card *card = file->private_data;
if (count > (sizeof(valbuf) - 1))
count = sizeof(valbuf) - 1; /* limit length */
if (copy_from_user(valbuf, buf, count))
return (-EFAULT); /* copy failed */
valbuf[count] = 0; /* terminating 0 */
rc = kstrtoul(valbuf, 0, &card->debug_flags);
if (rc < 0)
return rc;
hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
return (count);
} /* hysdn_log_write */
/******************/
/* read log file */
/******************/
static ssize_t
hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
{
struct log_data *inf;
int len;
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
struct procdata *pd = NULL;
hysdn_card *card;
if (!*((struct log_data **) file->private_data)) {
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
/* sorry, but we need to search the card */
card = card_root;
while (card) {
pd = card->proclog;
if (pd->log == pde)
break;
card = card->next; /* search next entry */
}
if (card)
interruptible_sleep_on(&(pd->rd_queue));
else
return (-EAGAIN);
}
if (!(inf = *((struct log_data **) file->private_data)))
return (0);
inf->usage_cnt--; /* new usage count */
file->private_data = &inf->next; /* next structure */
if ((len = strlen(inf->log_start)) <= count) {
if (copy_to_user(buf, inf->log_start, len))
return -EFAULT;
*off += len;
return (len);
}
return (0);
} /* hysdn_log_read */
/******************/
/* open log file */
/******************/
static int
hysdn_log_open(struct inode *ino, struct file *filep)
{
hysdn_card *card;
struct procdata *pd = NULL;
unsigned long flags;
mutex_lock(&hysdn_log_mutex);
card = card_root;
while (card) {
pd = card->proclog;
if (pd->log == PDE(ino))
break;
card = card->next; /* search next entry */
}
if (!card) {
mutex_unlock(&hysdn_log_mutex);
return (-ENODEV); /* device is unknown/invalid */
}
filep->private_data = card; /* remember our own card */
if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
/* write only access -> write log level only */
} else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
/* read access -> log/debug read */
spin_lock_irqsave(&card->hysdn_lock, flags);
pd->if_used++;
if (pd->log_head)
filep->private_data = &pd->log_tail->next;
else
filep->private_data = &pd->log_head;
spin_unlock_irqrestore(&card->hysdn_lock, flags);
} else { /* simultaneous read/write access forbidden ! */
mutex_unlock(&hysdn_log_mutex);
return (-EPERM); /* no permission this time */
}
mutex_unlock(&hysdn_log_mutex);
return nonseekable_open(ino, filep);
} /* hysdn_log_open */
/*******************************************************************************/
/* close a cardlog file. If the file has been opened for exclusive write it is */
/* assumed as pof data input and the pof loader is noticed about. */
/* Otherwise file is handled as log output. In this case the interface usage */
/* count is decremented and all buffers are noticed of closing. If this file */
/* was the last one to be closed, all buffers are freed. */
/*******************************************************************************/
static int
hysdn_log_close(struct inode *ino, struct file *filep)
{
struct log_data *inf;
struct procdata *pd;
hysdn_card *card;
int retval = 0;
mutex_lock(&hysdn_log_mutex);
if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
/* write only access -> write debug level written */
retval = 0; /* success */
} else {
/* read access -> log/debug read, mark one further file as closed */
pd = NULL;
inf = *((struct log_data **) filep->private_data); /* get first log entry */
if (inf)
pd = (struct procdata *) inf->proc_ctrl; /* still entries there */
else {
/* no info available -> search card */
card = card_root;
while (card) {
pd = card->proclog;
if (pd->log == PDE(ino))
break;
card = card->next; /* search next entry */
}
if (card)
pd = card->proclog; /* pointer to procfs log */
}
if (pd)
pd->if_used--; /* decrement interface usage count by one */
while (inf) {
inf->usage_cnt--; /* decrement usage count for buffers */
inf = inf->next;
}
if (pd)
if (pd->if_used <= 0) /* delete buffers if last file closed */
while (pd->log_head) {
inf = pd->log_head;
pd->log_head = pd->log_head->next;
kfree(inf);
}
} /* read access */
mutex_unlock(&hysdn_log_mutex);
return (retval);
} /* hysdn_log_close */
/*************************************************/
/* select/poll routine to be able using select() */
/*************************************************/
static unsigned int
hysdn_log_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
hysdn_card *card;
struct procdata *pd = NULL;
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE)
return (mask); /* no polling for write supported */
/* we need to search the card */
card = card_root;
while (card) {
pd = card->proclog;
if (pd->log == pde)
break;
card = card->next; /* search next entry */
}
if (!card)
return (mask); /* card not found */
poll_wait(file, &(pd->rd_queue), wait);
if (*((struct log_data **) file->private_data))
mask |= POLLIN | POLLRDNORM;
return mask;
} /* hysdn_log_poll */
/**************************************************/
/* table for log filesystem functions defined above. */
/**************************************************/
static const struct file_operations log_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hysdn_log_read,
.write = hysdn_log_write,
.poll = hysdn_log_poll,
.open = hysdn_log_open,
.release = hysdn_log_close,
};
/***********************************************************************************/
/* hysdn_proclog_init is called when the module is loaded after creating the cards */
/* conf files. */
/***********************************************************************************/
int
hysdn_proclog_init(hysdn_card *card)
{
struct procdata *pd;
/* create a cardlog proc entry */
if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
pd->log = proc_create(pd->log_name,
S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
&log_fops);
init_waitqueue_head(&(pd->rd_queue));
card->proclog = (void *) pd; /* remember procfs structure */
}
return (0);
} /* hysdn_proclog_init */
/************************************************************************************/
/* hysdn_proclog_release is called when the module is unloaded and before the cards */
/* conf file is released */
/* The module counter is assumed to be 0 ! */
/************************************************************************************/
void
hysdn_proclog_release(hysdn_card *card)
{
struct procdata *pd;
if ((pd = (struct procdata *) card->proclog) != NULL) {
if (pd->log)
remove_proc_entry(pd->log_name, hysdn_proc_entry);
kfree(pd); /* release memory */
card->proclog = NULL;
}
} /* hysdn_proclog_release */
| gpl-2.0 |
InfinitiveOS-Devices/android_kernel_motorola_msm8610 | fs/gfs2/bmap.c | 4787 | 32886 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "inode.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
#include "dir.h"
#include "util.h"
#include "trace_gfs2.h"
/* This doesn't need to be that large as max 64 bit pointers in a 4k
* block is 512, so __u16 is fine for that. It saves stack space to
* keep it small.
*/
struct metapath {
struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
__u16 mp_list[GFS2_MAX_META_HEIGHT];
};
struct strip_mine {
int sm_first;
unsigned int sm_height;
};
/**
* gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
* @ip: the inode
* @dibh: the dinode buffer
* @block: the block number that was allocated
* @page: The (optional) page. This is looked up if @page is NULL
*
* Returns: errno
*/
static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
u64 block, struct page *page)
{
struct inode *inode = &ip->i_inode;
struct buffer_head *bh;
int release = 0;
if (!page || page->index) {
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
if (!page)
return -ENOMEM;
release = 1;
}
if (!PageUptodate(page)) {
void *kaddr = kmap(page);
u64 dsize = i_size_read(inode);
if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
dsize = dibh->b_size - sizeof(struct gfs2_dinode);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
kunmap(page);
SetPageUptodate(page);
}
if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits,
(1 << BH_Uptodate));
bh = page_buffers(page);
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, block);
set_buffer_uptodate(bh);
if (!gfs2_is_jdata(ip))
mark_buffer_dirty(bh);
if (!gfs2_is_writeback(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0);
if (release) {
unlock_page(page);
page_cache_release(page);
}
return 0;
}
/**
* gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
* @ip: The GFS2 inode to unstuff
* @page: The (optional) page. This is looked up if the @page is NULL
*
* This routine unstuffs a dinode and returns it to a "normal" state such
* that the height can be grown in the traditional way.
*
* Returns: errno
*/
int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
{
struct buffer_head *bh, *dibh;
struct gfs2_dinode *di;
u64 block = 0;
int isdir = gfs2_is_dir(ip);
int error;
down_write(&ip->i_rw_mutex);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
if (i_size_read(&ip->i_inode)) {
/* Get a free block, fill it with the stuffed data,
and write it out to disk */
unsigned int n = 1;
error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
if (error)
goto out_brelse;
if (isdir) {
gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
error = gfs2_dir_get_new_buffer(ip, block, &bh);
if (error)
goto out_brelse;
gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
dibh, sizeof(struct gfs2_dinode));
brelse(bh);
} else {
error = gfs2_unstuffer_page(ip, dibh, block, page);
if (error)
goto out_brelse;
}
}
/* Set up the pointer to the new block */
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
di = (struct gfs2_dinode *)dibh->b_data;
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
if (i_size_read(&ip->i_inode)) {
*(__be64 *)(di + 1) = cpu_to_be64(block);
gfs2_add_inode_blocks(&ip->i_inode, 1);
di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
}
ip->i_height = 1;
di->di_height = cpu_to_be16(1);
out_brelse:
brelse(dibh);
out:
up_write(&ip->i_rw_mutex);
return error;
}
/**
* find_metapath - Find path through the metadata tree
* @sdp: The superblock
* @mp: The metapath to return the result in
* @block: The disk block to look up
* @height: The pre-calculated height of the metadata tree
*
* This routine returns a struct metapath structure that defines a path
* through the metadata of inode "ip" to get to block "block".
*
* Example:
* Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
* filesystem with a blocksize of 4096.
*
* find_metapath() would return a struct metapath structure set to:
* mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
* and mp_list[2] = 165.
*
* That means that in order to get to the block containing the byte at
* offset 101342453, we would load the indirect block pointed to by pointer
* 0 in the dinode. We would then load the indirect block pointed to by
* pointer 48 in that indirect block. We would then load the data block
* pointed to by pointer 165 in that indirect block.
*
* ----------------------------------------
* | Dinode | |
* | | 4|
* | |0 1 2 3 4 5 9|
* | | 6|
* ----------------------------------------
* |
* |
* V
* ----------------------------------------
* | Indirect Block |
* | 5|
* | 4 4 4 4 4 5 5 1|
* |0 5 6 7 8 9 0 1 2|
* ----------------------------------------
* |
* |
* V
* ----------------------------------------
* | Indirect Block |
* | 1 1 1 1 1 5|
* | 6 6 6 6 6 1|
* |0 3 4 5 6 7 2|
* ----------------------------------------
* |
* |
* V
* ----------------------------------------
* | Data block containing offset |
* | 101342453 |
* | |
* | |
* ----------------------------------------
*
*/
static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
struct metapath *mp, unsigned int height)
{
unsigned int i;
for (i = height; i--;)
mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
}
static inline unsigned int metapath_branch_start(const struct metapath *mp)
{
if (mp->mp_list[0] == 0)
return 2;
return 1;
}
/**
* metapointer - Return pointer to start of metadata in a buffer
* @height: The metadata height (0 = dinode)
* @mp: The metapath
*
* Return a pointer to the block number of the next height of the metadata
* tree given a buffer containing the pointer to the current height of the
* metadata tree.
*/
static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
{
struct buffer_head *bh = mp->mp_bh[height];
unsigned int head_size = (height > 0) ?
sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
}
static void gfs2_metapath_ra(struct gfs2_glock *gl,
const struct buffer_head *bh, const __be64 *pos)
{
struct buffer_head *rabh;
const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
const __be64 *t;
for (t = pos; t < endp; t++) {
if (!*t)
continue;
rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
if (trylock_buffer(rabh)) {
if (!buffer_uptodate(rabh)) {
rabh->b_end_io = end_buffer_read_sync;
submit_bh(READA | REQ_META, rabh);
continue;
}
unlock_buffer(rabh);
}
brelse(rabh);
}
}
/**
* lookup_metapath - Walk the metadata tree to a specific point
* @ip: The inode
* @mp: The metapath
*
* Assumes that the inode's buffer has already been looked up and
* hooked onto mp->mp_bh[0] and that the metapath has been initialised
* by find_metapath().
*
* If this function encounters part of the tree which has not been
* allocated, it returns the current height of the tree at the point
* at which it found the unallocated block. Blocks which are found are
* added to the mp->mp_bh[] list.
*
* Returns: error or height of metadata tree
*/
static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
{
unsigned int end_of_metadata = ip->i_height - 1;
unsigned int x;
__be64 *ptr;
u64 dblock;
int ret;
for (x = 0; x < end_of_metadata; x++) {
ptr = metapointer(x, mp);
dblock = be64_to_cpu(*ptr);
if (!dblock)
return x + 1;
ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, 0, &mp->mp_bh[x+1]);
if (ret)
return ret;
}
return ip->i_height;
}
static inline void release_metapath(struct metapath *mp)
{
int i;
for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
if (mp->mp_bh[i] == NULL)
break;
brelse(mp->mp_bh[i]);
}
}
/**
* gfs2_extent_length - Returns length of an extent of blocks
* @start: Start of the buffer
* @len: Length of the buffer in bytes
* @ptr: Current position in the buffer
* @limit: Max extent length to return (0 = unlimited)
* @eob: Set to 1 if we hit "end of block"
*
* If the first block is zero (unallocated) it will return the number of
* unallocated blocks in the extent, otherwise it will return the number
* of contiguous blocks in the extent.
*
* Returns: The length of the extent (minimum of one block)
*/
static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
{
const __be64 *end = (start + len);
const __be64 *first = ptr;
u64 d = be64_to_cpu(*ptr);
*eob = 0;
do {
ptr++;
if (ptr >= end)
break;
if (limit && --limit == 0)
break;
if (d)
d++;
} while(be64_to_cpu(*ptr) == d);
if (ptr >= end)
*eob = 1;
return (ptr - first);
}
static inline void bmap_lock(struct gfs2_inode *ip, int create)
{
if (create)
down_write(&ip->i_rw_mutex);
else
down_read(&ip->i_rw_mutex);
}
static inline void bmap_unlock(struct gfs2_inode *ip, int create)
{
if (create)
up_write(&ip->i_rw_mutex);
else
up_read(&ip->i_rw_mutex);
}
static inline __be64 *gfs2_indirect_init(struct metapath *mp,
struct gfs2_glock *gl, unsigned int i,
unsigned offset, u64 bn)
{
__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
((i > 1) ? sizeof(struct gfs2_meta_header) :
sizeof(struct gfs2_dinode)));
BUG_ON(i < 1);
BUG_ON(mp->mp_bh[i] != NULL);
mp->mp_bh[i] = gfs2_meta_new(gl, bn);
gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
ptr += offset;
*ptr = cpu_to_be64(bn);
return ptr;
}
enum alloc_state {
ALLOC_DATA = 0,
ALLOC_GROW_DEPTH = 1,
ALLOC_GROW_HEIGHT = 2,
/* ALLOC_UNSTUFF = 3, TBD and rather complicated */
};
/**
* gfs2_bmap_alloc - Build a metadata tree of the requested height
* @inode: The GFS2 inode
* @lblock: The logical starting block of the extent
* @bh_map: This is used to return the mapping details
* @mp: The metapath
* @sheight: The starting height (i.e. whats already mapped)
* @height: The height to build to
* @maxlen: The max number of data blocks to alloc
*
* In this routine we may have to alloc:
* i) Indirect blocks to grow the metadata tree height
* ii) Indirect blocks to fill in lower part of the metadata tree
* iii) Data blocks
*
* The function is in two parts. The first part works out the total
* number of blocks which we need. The second part does the actual
* allocation asking for an extent at a time (if enough contiguous free
* blocks are available, there will only be one request per bmap call)
* and uses the state machine to initialise the blocks in order.
*
* Returns: errno on error
*/
static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
struct buffer_head *bh_map, struct metapath *mp,
const unsigned int sheight,
const unsigned int height,
const unsigned int maxlen)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct super_block *sb = sdp->sd_vfs;
struct buffer_head *dibh = mp->mp_bh[0];
u64 bn, dblock = 0;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
unsigned dblks = 0;
unsigned ptrs_per_blk;
const unsigned end_of_metadata = height - 1;
int ret;
int eob = 0;
enum alloc_state state;
__be64 *ptr;
__be64 zero_bn = 0;
BUG_ON(sheight < 1);
BUG_ON(dibh == NULL);
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
if (height == sheight) {
struct buffer_head *bh;
/* Bottom indirect block exists, find unalloced extent size */
ptr = metapointer(end_of_metadata, mp);
bh = mp->mp_bh[end_of_metadata];
dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
&eob);
BUG_ON(dblks < 1);
state = ALLOC_DATA;
} else {
/* Need to allocate indirect blocks */
ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
if (height == ip->i_height) {
/* Writing into existing tree, extend tree down */
iblks = height - sheight;
state = ALLOC_GROW_DEPTH;
} else {
/* Building up tree height */
state = ALLOC_GROW_HEIGHT;
iblks = height - ip->i_height;
branch_start = metapath_branch_start(mp);
iblks += (height - branch_start);
}
}
/* start of the second part of the function (state machine) */
blks = dblks + iblks;
i = sheight;
do {
int error;
n = blks - alloced;
error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
if (error)
return error;
alloced += n;
if (state != ALLOC_DATA || gfs2_is_jdata(ip))
gfs2_trans_add_unrevoke(sdp, bn, n);
switch (state) {
/* Growing height of tree */
case ALLOC_GROW_HEIGHT:
if (i == 1) {
ptr = (__be64 *)(dibh->b_data +
sizeof(struct gfs2_dinode));
zero_bn = *ptr;
}
for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
if (i - 1 == height - ip->i_height) {
i--;
gfs2_buffer_copy_tail(mp->mp_bh[i],
sizeof(struct gfs2_meta_header),
dibh, sizeof(struct gfs2_dinode));
gfs2_buffer_clear_tail(dibh,
sizeof(struct gfs2_dinode) +
sizeof(__be64));
ptr = (__be64 *)(mp->mp_bh[i]->b_data +
sizeof(struct gfs2_meta_header));
*ptr = zero_bn;
state = ALLOC_GROW_DEPTH;
for(i = branch_start; i < height; i++) {
if (mp->mp_bh[i] == NULL)
break;
brelse(mp->mp_bh[i]);
mp->mp_bh[i] = NULL;
}
i = branch_start;
}
if (n == 0)
break;
/* Branching from existing tree */
case ALLOC_GROW_DEPTH:
if (i > 1 && i < height)
gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
for (; i < height && n > 0; i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i,
mp->mp_list[i-1], bn++);
if (i == height)
state = ALLOC_DATA;
if (n == 0)
break;
/* Tree complete, adding data blocks */
case ALLOC_DATA:
BUG_ON(n > dblks);
BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
dblks = n;
ptr = metapointer(end_of_metadata, mp);
dblock = bn;
while (n-- > 0)
*ptr++ = cpu_to_be64(bn++);
if (buffer_zeronew(bh_map)) {
ret = sb_issue_zeroout(sb, dblock, dblks,
GFP_NOFS);
if (ret) {
fs_err(sdp,
"Failed to zero data buffers\n");
clear_buffer_zeronew(bh_map);
}
}
break;
}
} while ((state != ALLOC_DATA) || !dblock);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
map_bh(bh_map, inode->i_sb, dblock);
bh_map->b_size = dblks << inode->i_blkbits;
set_buffer_new(bh_map);
return 0;
}
/**
* gfs2_block_map - Map a block from an inode to a disk block
* @inode: The inode
* @lblock: The logical block number
* @bh_map: The bh to be mapped
* @create: True if its ok to alloc blocks to satify the request
*
* Sets buffer_mapped() if successful, sets buffer_boundary() if a
* read of metadata will be required before the next block can be
* mapped. Sets buffer_new() if new blocks were allocated.
*
* Returns: errno
*/
int gfs2_block_map(struct inode *inode, sector_t lblock,
struct buffer_head *bh_map, int create)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
unsigned int bsize = sdp->sd_sb.sb_bsize;
const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
const u64 *arr = sdp->sd_heightsize;
__be64 *ptr;
u64 size;
struct metapath mp;
int ret;
int eob;
unsigned int len;
struct buffer_head *bh;
u8 height;
BUG_ON(maxlen == 0);
memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
bmap_lock(ip, create);
clear_buffer_mapped(bh_map);
clear_buffer_new(bh_map);
clear_buffer_boundary(bh_map);
trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
if (gfs2_is_dir(ip)) {
bsize = sdp->sd_jbsize;
arr = sdp->sd_jheightsize;
}
ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
if (ret)
goto out;
height = ip->i_height;
size = (lblock + 1) * bsize;
while (size > arr[height])
height++;
find_metapath(sdp, lblock, &mp, height);
ret = 1;
if (height > ip->i_height || gfs2_is_stuffed(ip))
goto do_alloc;
ret = lookup_metapath(ip, &mp);
if (ret < 0)
goto out;
if (ret != ip->i_height)
goto do_alloc;
ptr = metapointer(ip->i_height - 1, &mp);
if (*ptr == 0)
goto do_alloc;
map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
bh = mp.mp_bh[ip->i_height - 1];
len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
bh_map->b_size = (len << inode->i_blkbits);
if (eob)
set_buffer_boundary(bh_map);
ret = 0;
out:
release_metapath(&mp);
trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
bmap_unlock(ip, create);
return ret;
do_alloc:
/* All allocations are done here, firstly check create flag */
if (!create) {
BUG_ON(gfs2_is_stuffed(ip));
ret = 0;
goto out;
}
/* At this point ret is the tree depth of already allocated blocks */
ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
goto out;
}
/*
* Deprecated: do not use in new code
*/
int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
{
struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
int ret;
int create = *new;
BUG_ON(!extlen);
BUG_ON(!dblock);
BUG_ON(!new);
bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
ret = gfs2_block_map(inode, lblock, &bh, create);
*extlen = bh.b_size >> inode->i_blkbits;
*dblock = bh.b_blocknr;
if (buffer_new(&bh))
*new = 1;
else
*new = 0;
return ret;
}
/**
* do_strip - Look for a layer a particular layer of the file and strip it off
* @ip: the inode
* @dibh: the dinode buffer
* @bh: A buffer of pointers
* @top: The first pointer in the buffer
* @bottom: One more than the last pointer
* @height: the height this buffer is at
* @data: a pointer to a struct strip_mine
*
* Returns: errno
*/
static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
struct buffer_head *bh, __be64 *top, __be64 *bottom,
unsigned int height, struct strip_mine *sm)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrp_list rlist;
u64 bn, bstart;
u32 blen, btotal;
__be64 *p;
unsigned int rg_blocks = 0;
int metadata;
unsigned int revokes = 0;
int x;
int error;
error = gfs2_rindex_update(sdp);
if (error)
return error;
if (!*top)
sm->sm_first = 0;
if (height != sm->sm_height)
return 0;
if (sm->sm_first) {
top++;
sm->sm_first = 0;
}
metadata = (height != ip->i_height - 1);
if (metadata)
revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
else if (ip->i_depth)
revokes = sdp->sd_inptrs;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
bstart = 0;
blen = 0;
for (p = top; p < bottom; p++) {
if (!*p)
continue;
bn = be64_to_cpu(*p);
if (bstart + blen == bn)
blen++;
else {
if (bstart)
gfs2_rlist_add(ip, &rlist, bstart);
bstart = bn;
blen = 1;
}
}
if (bstart)
gfs2_rlist_add(ip, &rlist, bstart);
else
goto out; /* Nothing to do */
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length;
}
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
if (error)
goto out_rlist;
error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
RES_INDIRECT + RES_STATFS + RES_QUOTA,
revokes);
if (error)
goto out_rg_gunlock;
down_write(&ip->i_rw_mutex);
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
bstart = 0;
blen = 0;
btotal = 0;
for (p = top; p < bottom; p++) {
if (!*p)
continue;
bn = be64_to_cpu(*p);
if (bstart + blen == bn)
blen++;
else {
if (bstart) {
__gfs2_free_blocks(ip, bstart, blen, metadata);
btotal += blen;
}
bstart = bn;
blen = 1;
}
*p = 0;
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart) {
__gfs2_free_blocks(ip, bstart, blen, metadata);
btotal += blen;
}
gfs2_statfs_change(sdp, 0, +btotal, 0);
gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
ip->i_inode.i_gid);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
out_rg_gunlock:
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
out_rlist:
gfs2_rlist_free(&rlist);
out:
return error;
}
/**
* recursive_scan - recursively scan through the end of a file
* @ip: the inode
* @dibh: the dinode buffer
* @mp: the path through the metadata to the point to start
* @height: the height the recursion is at
* @block: the indirect block to look at
* @first: 1 if this is the first block
* @sm: data opaque to this function to pass to @bc
*
* When this is first called @height and @block should be zero and
* @first should be 1.
*
* Returns: errno
*/
static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
struct metapath *mp, unsigned int height,
u64 block, int first, struct strip_mine *sm)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *bh = NULL;
__be64 *top, *bottom;
u64 bn;
int error;
int mh_size = sizeof(struct gfs2_meta_header);
if (!height) {
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return error;
dibh = bh;
top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
} else {
error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
if (error)
return error;
top = (__be64 *)(bh->b_data + mh_size) +
(first ? mp->mp_list[height] : 0);
bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
}
error = do_strip(ip, dibh, bh, top, bottom, height, sm);
if (error)
goto out;
if (height < ip->i_height - 1) {
gfs2_metapath_ra(ip->i_gl, bh, top);
for (; top < bottom; top++, first = 0) {
if (!*top)
continue;
bn = be64_to_cpu(*top);
error = recursive_scan(ip, dibh, mp, height + 1, bn,
first, sm);
if (error)
break;
}
}
out:
brelse(bh);
return error;
}
/**
* gfs2_block_truncate_page - Deal with zeroing out data for truncate
*
* This is partly borrowed from ext3.
*/
static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
{
struct inode *inode = mapping->host;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned long index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize, iblock, length, pos;
struct buffer_head *bh;
struct page *page;
int err;
page = find_or_create_page(mapping, index, GFP_NOFS);
if (!page)
return 0;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
pos = blocksize;
while (offset >= pos) {
bh = bh->b_this_page;
iblock++;
pos += blocksize;
}
err = 0;
if (!buffer_mapped(bh)) {
gfs2_block_map(inode, iblock, bh, 0);
/* unmapped? It's a hole - nothing to do */
if (!buffer_mapped(bh))
goto unlock;
}
/* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
err = -EIO;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
err = 0;
}
if (!gfs2_is_writeback(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0);
zero_user(page, offset, length);
mark_buffer_dirty(bh);
unlock:
unlock_page(page);
page_cache_release(page);
return err;
}
static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *mapping = inode->i_mapping;
struct buffer_head *dibh;
int journaled = gfs2_is_jdata(ip);
int error;
error = gfs2_trans_begin(sdp,
RES_DINODE + (journaled ? RES_JDATA : 0), 0);
if (error)
return error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
if (gfs2_is_stuffed(ip)) {
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
} else {
if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
error = gfs2_block_truncate_page(mapping, newsize);
if (error)
goto out_brelse;
}
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
}
i_size_write(inode, newsize);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
truncate_pagecache(inode, oldsize, newsize);
out_brelse:
brelse(dibh);
out:
gfs2_trans_end(sdp);
return error;
}
static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
unsigned int height = ip->i_height;
u64 lblock;
struct metapath mp;
int error;
if (!size)
lblock = 0;
else
lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
find_metapath(sdp, lblock, &mp, ip->i_height);
if (!gfs2_qadata_get(ip))
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out;
while (height--) {
struct strip_mine sm;
sm.sm_first = !!size;
sm.sm_height = height;
error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
if (error)
break;
}
gfs2_quota_unhold(ip);
out:
gfs2_qadata_put(ip);
return error;
}
static int trunc_end(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
int error;
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (error)
return error;
down_write(&ip->i_rw_mutex);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
if (!i_size_read(&ip->i_inode)) {
ip->i_height = 0;
ip->i_goal = ip->i_no_addr;
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
}
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
out:
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
return error;
}
/**
* do_shrink - make a file smaller
* @inode: the inode
* @oldsize: the current inode size
* @newsize: the size to make the file
*
* Called with an exclusive lock on @inode. The @size must
* be equal to or smaller than the current inode size.
*
* Returns: errno
*/
static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int error;
error = trunc_start(inode, oldsize, newsize);
if (error < 0)
return error;
if (gfs2_is_stuffed(ip))
return 0;
error = trunc_dealloc(ip, newsize);
if (error == 0)
error = trunc_end(ip);
return error;
}
void gfs2_trim_blocks(struct inode *inode)
{
u64 size = inode->i_size;
int ret;
ret = do_shrink(inode, size, size);
WARN_ON(ret != 0);
}
/**
* do_grow - Touch and update inode size
* @inode: The inode
* @size: The new size
*
* This function updates the timestamps on the inode and
* may also increase the size of the inode. This function
* must not be called with @size any smaller than the current
* inode size.
*
* Although it is not strictly required to unstuff files here,
* earlier versions of GFS2 have a bug in the stuffed file reading
* code which will result in a buffer overrun if the size is larger
* than the max stuffed file size. In order to prevent this from
* occurring, such files are unstuffed, but in other cases we can
* just update the inode size directly.
*
* Returns: 0 on success, or -ve on error
*/
static int do_grow(struct inode *inode, u64 size)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
struct gfs2_qadata *qa = NULL;
int error;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
qa = gfs2_qadata_get(ip);
if (qa == NULL)
return -ENOMEM;
error = gfs2_quota_lock_check(ip);
if (error)
goto do_grow_alloc_put;
error = gfs2_inplace_reserve(ip, 1);
if (error)
goto do_grow_qunlock;
}
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
if (error)
goto do_grow_release;
if (qa) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
goto do_end_trans;
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto do_end_trans;
i_size_write(inode, size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
do_end_trans:
gfs2_trans_end(sdp);
do_grow_release:
if (qa) {
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
do_grow_alloc_put:
gfs2_qadata_put(ip);
}
return error;
}
/**
* gfs2_setattr_size - make a file a given size
* @inode: the inode
* @newsize: the size to make the file
*
* The file size can grow, shrink, or stay the same size. This
* is called holding i_mutex and an exclusive glock on the inode
* in question.
*
* Returns: errno
*/
int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
int ret;
u64 oldsize;
BUG_ON(!S_ISREG(inode->i_mode));
ret = inode_newsize_ok(inode, newsize);
if (ret)
return ret;
inode_dio_wait(inode);
oldsize = inode->i_size;
if (newsize >= oldsize)
return do_grow(inode, newsize);
return do_shrink(inode, oldsize, newsize);
}
int gfs2_truncatei_resume(struct gfs2_inode *ip)
{
int error;
error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
if (!error)
error = trunc_end(ip);
return error;
}
int gfs2_file_dealloc(struct gfs2_inode *ip)
{
return trunc_dealloc(ip, 0);
}
/**
* gfs2_write_alloc_required - figure out if a write will require an allocation
* @ip: the file being written to
* @offset: the offset to write to
* @len: the number of bytes being written
*
* Returns: 1 if an alloc is required, 0 otherwise
*/
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
unsigned int len)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head bh;
unsigned int shift;
u64 lblock, lblock_stop, size;
u64 end_of_file;
if (!len)
return 0;
if (gfs2_is_stuffed(ip)) {
if (offset + len >
sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
return 1;
return 0;
}
shift = sdp->sd_sb.sb_bsize_shift;
BUG_ON(gfs2_is_dir(ip));
end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
if (lblock_stop > end_of_file)
return 1;
size = (lblock_stop - lblock) << shift;
do {
bh.b_state = 0;
bh.b_size = size;
gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
if (!buffer_mapped(&bh))
return 1;
size -= bh.b_size;
lblock += (bh.b_size >> ip->i_inode.i_blkbits);
} while(size > 0);
return 0;
}
| gpl-2.0 |
theapant/ZTE_N800-kernel | arch/powerpc/sysdev/mpic_msi.c | 6835 | 2450 | /*
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the
* License.
*
*/
#include <linux/irq.h>
#include <linux/bitmap.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
#include <sysdev/mpic.h>
void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
{
/* The mpic calls this even when there is no allocator setup */
if (!mpic->msi_bitmap.bitmap)
return;
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
irq_hw_number_t hwirq;
const struct irq_domain_ops *ops = mpic->irqhost->ops;
struct device_node *np;
int flags, index, i;
struct of_irq oirq;
pr_debug("mpic: found U3, guessing msi allocator setup\n");
/* Reserve source numbers we know are reserved in the HW.
*
* This is a bit of a mix of U3 and U4 reserves but that's going
* to work fine, we have plenty enugh numbers left so let's just
* mark anything we don't like reserved.
*/
for (i = 0; i < 8; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 42; i < 46; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 100; i < 105; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 124; i < mpic->num_sources; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
np = NULL;
while ((np = of_find_all_nodes(np))) {
pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
index = 0;
while (of_irq_map_one(np, index++, &oirq) == 0) {
ops->xlate(mpic->irqhost, NULL, oirq.specifier,
oirq.size, &hwirq, &flags);
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
}
}
return 0;
}
#else
static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
return -1;
}
#endif
int mpic_msi_init_allocator(struct mpic *mpic)
{
int rc;
rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
mpic->irqhost->of_node);
if (rc)
return rc;
rc = msi_bitmap_reserve_dt_hwirqs(&mpic->msi_bitmap);
if (rc > 0) {
if (mpic->flags & MPIC_U3_HT_IRQS)
rc = mpic_msi_reserve_u3_hwirqs(mpic);
if (rc) {
msi_bitmap_free(&mpic->msi_bitmap);
return rc;
}
}
return 0;
}
| gpl-2.0 |
mandfx/android_kernel_huawei_c8816 | fs/jbd/revoke.c | 7859 | 21797 | /*
* linux/fs/jbd/revoke.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 2000
*
* Copyright 2000 Red Hat corp --- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* Journal revoke routines for the generic filesystem journaling code;
* part of the ext2fs journaling system.
*
* Revoke is the mechanism used to prevent old log records for deleted
* metadata from being replayed on top of newer data using the same
* blocks. The revoke mechanism is used in two separate places:
*
* + Commit: during commit we write the entire list of the current
* transaction's revoked blocks to the journal
*
* + Recovery: during recovery we record the transaction ID of all
* revoked blocks. If there are multiple revoke records in the log
* for a single block, only the last one counts, and if there is a log
* entry for a block beyond the last revoke, then that log entry still
* gets replayed.
*
* We can get interactions between revokes and new log data within a
* single transaction:
*
* Block is revoked and then journaled:
* The desired end result is the journaling of the new block, so we
* cancel the revoke before the transaction commits.
*
* Block is journaled and then revoked:
* The revoke must take precedence over the write of the block, so we
* need either to cancel the journal entry or to write the revoke
* later in the log than the log block. In this case, we choose the
* latter: journaling a block cancels any revoke record for that block
* in the current transaction, so any revoke for that block in the
* transaction must have happened after the block was journaled and so
* the revoke must take precedence.
*
* Block is revoked and then written as data:
* The data write is allowed to succeed, but the revoke is _not_
* cancelled. We still need to prevent old log records from
* overwriting the new data. We don't even need to clear the revoke
* bit here.
*
* We cache revoke status of a buffer in the current transaction in b_states
* bits. As the name says, revokevalid flag indicates that the cached revoke
* status of a buffer is valid and we can rely on the cached status.
*
* Revoke information on buffers is a tri-state value:
*
* RevokeValid clear: no cached revoke status, need to look it up
* RevokeValid set, Revoked clear:
* buffer has not been revoked, and cancel_revoke
* need do nothing.
* RevokeValid set, Revoked set:
* buffer has been revoked.
*
* Locking rules:
* We keep two hash tables of revoke records. One hashtable belongs to the
* running transaction (is pointed to by journal->j_revoke), the other one
* belongs to the committing transaction. Accesses to the second hash table
* happen only from the kjournald and no other thread touches this table. Also
* journal_switch_revoke_table() which switches which hashtable belongs to the
* running and which to the committing transaction is called only from
* kjournald. Therefore we need no locks when accessing the hashtable belonging
* to the committing transaction.
*
* All users operating on the hash table belonging to the running transaction
* have a handle to the transaction. Therefore they are safe from kjournald
* switching hash tables under them. For operations on the lists of entries in
* the hash table j_revoke_lock is used.
*
* Finally, also replay code uses the hash tables but at this moment no one else
* can touch them (filesystem isn't mounted yet) and hence no locking is
* needed.
*/
#ifndef __KERNEL__
#include "jfs_user.h"
#else
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/bio.h>
#endif
#include <linux/log2.h>
static struct kmem_cache *revoke_record_cache;
static struct kmem_cache *revoke_table_cache;
/* Each revoke record represents one single revoked block. During
journal replay, this involves recording the transaction ID of the
last transaction to revoke this block. */
struct jbd_revoke_record_s
{
struct list_head hash;
tid_t sequence; /* Used for recovery only */
unsigned int blocknr;
};
/* The revoke table is just a simple hash table of revoke records. */
struct jbd_revoke_table_s
{
/* It is conceivable that we might want a larger hash table
* for recovery. Must be a power of two. */
int hash_size;
int hash_shift;
struct list_head *hash_table;
};
#ifdef __KERNEL__
static void write_one_revoke_record(journal_t *, transaction_t *,
struct journal_head **, int *,
struct jbd_revoke_record_s *, int);
static void flush_descriptor(journal_t *, struct journal_head *, int, int);
#endif
/* Utility functions to maintain the revoke table */
/* Borrowed from buffer.c: this is a tried and tested block hash function */
static inline int hash(journal_t *journal, unsigned int block)
{
struct jbd_revoke_table_s *table = journal->j_revoke;
int hash_shift = table->hash_shift;
return ((block << (hash_shift - 6)) ^
(block >> 13) ^
(block << (hash_shift - 12))) & (table->hash_size - 1);
}
static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
tid_t seq)
{
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
repeat:
record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
if (!record)
goto oom;
record->sequence = seq;
record->blocknr = blocknr;
hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
spin_lock(&journal->j_revoke_lock);
list_add(&record->hash, hash_list);
spin_unlock(&journal->j_revoke_lock);
return 0;
oom:
if (!journal_oom_retry)
return -ENOMEM;
jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
yield();
goto repeat;
}
/* Find a revoke record in the journal's hash table. */
static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
unsigned int blocknr)
{
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
spin_lock(&journal->j_revoke_lock);
record = (struct jbd_revoke_record_s *) hash_list->next;
while (&(record->hash) != hash_list) {
if (record->blocknr == blocknr) {
spin_unlock(&journal->j_revoke_lock);
return record;
}
record = (struct jbd_revoke_record_s *) record->hash.next;
}
spin_unlock(&journal->j_revoke_lock);
return NULL;
}
void journal_destroy_revoke_caches(void)
{
if (revoke_record_cache) {
kmem_cache_destroy(revoke_record_cache);
revoke_record_cache = NULL;
}
if (revoke_table_cache) {
kmem_cache_destroy(revoke_table_cache);
revoke_table_cache = NULL;
}
}
int __init journal_init_revoke_caches(void)
{
J_ASSERT(!revoke_record_cache);
J_ASSERT(!revoke_table_cache);
revoke_record_cache = kmem_cache_create("revoke_record",
sizeof(struct jbd_revoke_record_s),
0,
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
NULL);
if (!revoke_record_cache)
goto record_cache_failure;
revoke_table_cache = kmem_cache_create("revoke_table",
sizeof(struct jbd_revoke_table_s),
0, SLAB_TEMPORARY, NULL);
if (!revoke_table_cache)
goto table_cache_failure;
return 0;
table_cache_failure:
journal_destroy_revoke_caches();
record_cache_failure:
return -ENOMEM;
}
static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
{
int shift = 0;
int tmp = hash_size;
struct jbd_revoke_table_s *table;
table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
if (!table)
goto out;
while((tmp >>= 1UL) != 0UL)
shift++;
table->hash_size = hash_size;
table->hash_shift = shift;
table->hash_table =
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
if (!table->hash_table) {
kmem_cache_free(revoke_table_cache, table);
table = NULL;
goto out;
}
for (tmp = 0; tmp < hash_size; tmp++)
INIT_LIST_HEAD(&table->hash_table[tmp]);
out:
return table;
}
static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
{
int i;
struct list_head *hash_list;
for (i = 0; i < table->hash_size; i++) {
hash_list = &table->hash_table[i];
J_ASSERT(list_empty(hash_list));
}
kfree(table->hash_table);
kmem_cache_free(revoke_table_cache, table);
}
/* Initialise the revoke table for a given journal to a given size. */
int journal_init_revoke(journal_t *journal, int hash_size)
{
J_ASSERT(journal->j_revoke_table[0] == NULL);
J_ASSERT(is_power_of_2(hash_size));
journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
if (!journal->j_revoke_table[0])
goto fail0;
journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
if (!journal->j_revoke_table[1])
goto fail1;
journal->j_revoke = journal->j_revoke_table[1];
spin_lock_init(&journal->j_revoke_lock);
return 0;
fail1:
journal_destroy_revoke_table(journal->j_revoke_table[0]);
fail0:
return -ENOMEM;
}
/* Destroy a journal's revoke table. The table must already be empty! */
void journal_destroy_revoke(journal_t *journal)
{
journal->j_revoke = NULL;
if (journal->j_revoke_table[0])
journal_destroy_revoke_table(journal->j_revoke_table[0]);
if (journal->j_revoke_table[1])
journal_destroy_revoke_table(journal->j_revoke_table[1]);
}
#ifdef __KERNEL__
/*
* journal_revoke: revoke a given buffer_head from the journal. This
* prevents the block from being replayed during recovery if we take a
* crash after this current transaction commits. Any subsequent
* metadata writes of the buffer in this transaction cancel the
* revoke.
*
* Note that this call may block --- it is up to the caller to make
* sure that there are no further calls to journal_write_metadata
* before the revoke is complete. In ext3, this implies calling the
* revoke before clearing the block bitmap when we are deleting
* metadata.
*
* Revoke performs a journal_forget on any buffer_head passed in as a
* parameter, but does _not_ forget the buffer_head if the bh was only
* found implicitly.
*
* bh_in may not be a journalled buffer - it may have come off
* the hash tables without an attached journal_head.
*
* If bh_in is non-zero, journal_revoke() will decrement its b_count
* by one.
*/
int journal_revoke(handle_t *handle, unsigned int blocknr,
struct buffer_head *bh_in)
{
struct buffer_head *bh = NULL;
journal_t *journal;
struct block_device *bdev;
int err;
might_sleep();
if (bh_in)
BUFFER_TRACE(bh_in, "enter");
journal = handle->h_transaction->t_journal;
if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
J_ASSERT (!"Cannot set revoke feature!");
return -EINVAL;
}
bdev = journal->j_fs_dev;
bh = bh_in;
if (!bh) {
bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
if (bh)
BUFFER_TRACE(bh, "found on hash");
}
#ifdef JBD_EXPENSIVE_CHECKING
else {
struct buffer_head *bh2;
/* If there is a different buffer_head lying around in
* memory anywhere... */
bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
if (bh2) {
/* ... and it has RevokeValid status... */
if (bh2 != bh && buffer_revokevalid(bh2))
/* ...then it better be revoked too,
* since it's illegal to create a revoke
* record against a buffer_head which is
* not marked revoked --- that would
* risk missing a subsequent revoke
* cancel. */
J_ASSERT_BH(bh2, buffer_revoked(bh2));
put_bh(bh2);
}
}
#endif
/* We really ought not ever to revoke twice in a row without
first having the revoke cancelled: it's illegal to free a
block twice without allocating it in between! */
if (bh) {
if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
"inconsistent data on disk")) {
if (!bh_in)
brelse(bh);
return -EIO;
}
set_buffer_revoked(bh);
set_buffer_revokevalid(bh);
if (bh_in) {
BUFFER_TRACE(bh_in, "call journal_forget");
journal_forget(handle, bh_in);
} else {
BUFFER_TRACE(bh, "call brelse");
__brelse(bh);
}
}
jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in);
err = insert_revoke_hash(journal, blocknr,
handle->h_transaction->t_tid);
BUFFER_TRACE(bh_in, "exit");
return err;
}
/*
* Cancel an outstanding revoke. For use only internally by the
* journaling code (called from journal_get_write_access).
*
* We trust buffer_revoked() on the buffer if the buffer is already
* being journaled: if there is no revoke pending on the buffer, then we
* don't do anything here.
*
* This would break if it were possible for a buffer to be revoked and
* discarded, and then reallocated within the same transaction. In such
* a case we would have lost the revoked bit, but when we arrived here
* the second time we would still have a pending revoke to cancel. So,
* do not trust the Revoked bit on buffers unless RevokeValid is also
* set.
*/
int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
{
struct jbd_revoke_record_s *record;
journal_t *journal = handle->h_transaction->t_journal;
int need_cancel;
int did_revoke = 0; /* akpm: debug */
struct buffer_head *bh = jh2bh(jh);
jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
/* Is the existing Revoke bit valid? If so, we trust it, and
* only perform the full cancel if the revoke bit is set. If
* not, we can't trust the revoke bit, and we need to do the
* full search for a revoke record. */
if (test_set_buffer_revokevalid(bh)) {
need_cancel = test_clear_buffer_revoked(bh);
} else {
need_cancel = 1;
clear_buffer_revoked(bh);
}
if (need_cancel) {
record = find_revoke_record(journal, bh->b_blocknr);
if (record) {
jbd_debug(4, "cancelled existing revoke on "
"blocknr %llu\n", (unsigned long long)bh->b_blocknr);
spin_lock(&journal->j_revoke_lock);
list_del(&record->hash);
spin_unlock(&journal->j_revoke_lock);
kmem_cache_free(revoke_record_cache, record);
did_revoke = 1;
}
}
#ifdef JBD_EXPENSIVE_CHECKING
/* There better not be one left behind by now! */
record = find_revoke_record(journal, bh->b_blocknr);
J_ASSERT_JH(jh, record == NULL);
#endif
/* Finally, have we just cleared revoke on an unhashed
* buffer_head? If so, we'd better make sure we clear the
* revoked status on any hashed alias too, otherwise the revoke
* state machine will get very upset later on. */
if (need_cancel) {
struct buffer_head *bh2;
bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
if (bh2) {
if (bh2 != bh)
clear_buffer_revoked(bh2);
__brelse(bh2);
}
}
return did_revoke;
}
/*
* journal_clear_revoked_flags clears revoked flag of buffers in
* revoke table to reflect there is no revoked buffer in the next
* transaction which is going to be started.
*/
void journal_clear_buffer_revoked_flags(journal_t *journal)
{
struct jbd_revoke_table_s *revoke = journal->j_revoke;
int i = 0;
for (i = 0; i < revoke->hash_size; i++) {
struct list_head *hash_list;
struct list_head *list_entry;
hash_list = &revoke->hash_table[i];
list_for_each(list_entry, hash_list) {
struct jbd_revoke_record_s *record;
struct buffer_head *bh;
record = (struct jbd_revoke_record_s *)list_entry;
bh = __find_get_block(journal->j_fs_dev,
record->blocknr,
journal->j_blocksize);
if (bh) {
clear_buffer_revoked(bh);
__brelse(bh);
}
}
}
}
/* journal_switch_revoke table select j_revoke for next transaction
* we do not want to suspend any processing until all revokes are
* written -bzzz
*/
void journal_switch_revoke_table(journal_t *journal)
{
int i;
if (journal->j_revoke == journal->j_revoke_table[0])
journal->j_revoke = journal->j_revoke_table[1];
else
journal->j_revoke = journal->j_revoke_table[0];
for (i = 0; i < journal->j_revoke->hash_size; i++)
INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
}
/*
* Write revoke records to the journal for all entries in the current
* revoke hash, deleting the entries as we go.
*/
void journal_write_revoke_records(journal_t *journal,
transaction_t *transaction, int write_op)
{
struct journal_head *descriptor;
struct jbd_revoke_record_s *record;
struct jbd_revoke_table_s *revoke;
struct list_head *hash_list;
int i, offset, count;
descriptor = NULL;
offset = 0;
count = 0;
/* select revoke table for committing transaction */
revoke = journal->j_revoke == journal->j_revoke_table[0] ?
journal->j_revoke_table[1] : journal->j_revoke_table[0];
for (i = 0; i < revoke->hash_size; i++) {
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
record = (struct jbd_revoke_record_s *)
hash_list->next;
write_one_revoke_record(journal, transaction,
&descriptor, &offset,
record, write_op);
count++;
list_del(&record->hash);
kmem_cache_free(revoke_record_cache, record);
}
}
if (descriptor)
flush_descriptor(journal, descriptor, offset, write_op);
jbd_debug(1, "Wrote %d revoke records\n", count);
}
/*
* Write out one revoke record. We need to create a new descriptor
* block if the old one is full or if we have not already created one.
*/
static void write_one_revoke_record(journal_t *journal,
transaction_t *transaction,
struct journal_head **descriptorp,
int *offsetp,
struct jbd_revoke_record_s *record,
int write_op)
{
struct journal_head *descriptor;
int offset;
journal_header_t *header;
/* If we are already aborting, this all becomes a noop. We
still need to go round the loop in
journal_write_revoke_records in order to free all of the
revoke records: only the IO to the journal is omitted. */
if (is_journal_aborted(journal))
return;
descriptor = *descriptorp;
offset = *offsetp;
/* Make sure we have a descriptor with space left for the record */
if (descriptor) {
if (offset == journal->j_blocksize) {
flush_descriptor(journal, descriptor, offset, write_op);
descriptor = NULL;
}
}
if (!descriptor) {
descriptor = journal_get_descriptor_buffer(journal);
if (!descriptor)
return;
header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
header->h_sequence = cpu_to_be32(transaction->t_tid);
/* Record it so that we can wait for IO completion later */
JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
journal_file_buffer(descriptor, transaction, BJ_LogCtl);
offset = sizeof(journal_revoke_header_t);
*descriptorp = descriptor;
}
* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
cpu_to_be32(record->blocknr);
offset += 4;
*offsetp = offset;
}
/*
* Flush a revoke descriptor out to the journal. If we are aborting,
* this is a noop; otherwise we are generating a buffer which needs to
* be waited for during commit, so it has to go onto the appropriate
* journal buffer list.
*/
static void flush_descriptor(journal_t *journal,
struct journal_head *descriptor,
int offset, int write_op)
{
journal_revoke_header_t *header;
struct buffer_head *bh = jh2bh(descriptor);
if (is_journal_aborted(journal)) {
put_bh(bh);
return;
}
header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
header->r_count = cpu_to_be32(offset);
set_buffer_jwrite(bh);
BUFFER_TRACE(bh, "write");
set_buffer_dirty(bh);
write_dirty_buffer(bh, write_op);
}
#endif
/*
* Revoke support for recovery.
*
* Recovery needs to be able to:
*
* record all revoke records, including the tid of the latest instance
* of each revoke in the journal
*
* check whether a given block in a given transaction should be replayed
* (ie. has not been revoked by a revoke record in that or a subsequent
* transaction)
*
* empty the revoke table after recovery.
*/
/*
* First, setting revoke records. We create a new revoke record for
* every block ever revoked in the log as we scan it for recovery, and
* we update the existing records if we find multiple revokes for a
* single block.
*/
int journal_set_revoke(journal_t *journal,
unsigned int blocknr,
tid_t sequence)
{
struct jbd_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (record) {
/* If we have multiple occurrences, only record the
* latest sequence number in the hashed record */
if (tid_gt(sequence, record->sequence))
record->sequence = sequence;
return 0;
}
return insert_revoke_hash(journal, blocknr, sequence);
}
/*
* Test revoke records. For a given block referenced in the log, has
* that block been revoked? A revoke record with a given transaction
* sequence number revokes all blocks in that transaction and earlier
* ones, but later transactions still need replayed.
*/
int journal_test_revoke(journal_t *journal,
unsigned int blocknr,
tid_t sequence)
{
struct jbd_revoke_record_s *record;
record = find_revoke_record(journal, blocknr);
if (!record)
return 0;
if (tid_gt(sequence, record->sequence))
return 0;
return 1;
}
/*
* Finally, once recovery is over, we need to clear the revoke table so
* that it can be reused by the running filesystem.
*/
void journal_clear_revoke(journal_t *journal)
{
int i;
struct list_head *hash_list;
struct jbd_revoke_record_s *record;
struct jbd_revoke_table_s *revoke;
revoke = journal->j_revoke;
for (i = 0; i < revoke->hash_size; i++) {
hash_list = &revoke->hash_table[i];
while (!list_empty(hash_list)) {
record = (struct jbd_revoke_record_s*) hash_list->next;
list_del(&record->hash);
kmem_cache_free(revoke_record_cache, record);
}
}
}
| gpl-2.0 |
ganjafuzz/PureKernel-v2-CAF | arch/x86/kernel/cpu/rdrand.c | 8115 | 2013 | /*
* This file is part of the Linux kernel.
*
* Copyright (c) 2011, Intel Corporation
* Authors: Fenghua Yu <fenghua.yu@intel.com>,
* H. Peter Anvin <hpa@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <asm/processor.h>
#include <asm/archrandom.h>
#include <asm/sections.h>
static int __init x86_rdrand_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_RDRAND);
return 1;
}
__setup("nordrand", x86_rdrand_setup);
/* We can't use arch_get_random_long() here since alternatives haven't run */
static inline int rdrand_long(unsigned long *v)
{
int ok;
asm volatile("1: " RDRAND_LONG "\n\t"
"jc 2f\n\t"
"decl %0\n\t"
"jnz 1b\n\t"
"2:"
: "=r" (ok), "=a" (*v)
: "0" (RDRAND_RETRY_LOOPS));
return ok;
}
/*
* Force a reseed cycle; we are architecturally guaranteed a reseed
* after no more than 512 128-bit chunks of random data. This also
* acts as a test of the CPU capability.
*/
#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_ARCH_RANDOM
unsigned long tmp;
int i, count, ok;
if (!cpu_has(c, X86_FEATURE_RDRAND))
return; /* Nothing to do */
for (count = i = 0; i < RESEED_LOOP; i++) {
ok = rdrand_long(&tmp);
if (ok)
count++;
}
if (count != RESEED_LOOP)
clear_cpu_cap(c, X86_FEATURE_RDRAND);
#endif
}
| gpl-2.0 |
xtreamerdev/linux-mips | drivers/scsi/aic94xx/aic94xx_hwi.c | 9395 | 38899 | /*
* Aic94xx SAS/SATA driver hardware interface.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
*
* This file is part of the aic94xx driver.
*
* The aic94xx driver is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* The aic94xx driver is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the aic94xx driver; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include "aic94xx.h"
#include "aic94xx_reg.h"
#include "aic94xx_hwi.h"
#include "aic94xx_seq.h"
#include "aic94xx_dump.h"
u32 MBAR0_SWB_SIZE;
/* ---------- Initialization ---------- */
static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
{
/* adapter came with a sas address */
if (asd_ha->hw_prof.sas_addr[0])
return 0;
return sas_request_addr(asd_ha->sas_ha.core.shost,
asd_ha->hw_prof.sas_addr);
}
static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
{
int i;
for (i = 0; i < ASD_MAX_PHYS; i++) {
if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
continue;
/* Set a phy's address only if it has none.
*/
ASD_DPRINTK("setting phy%d addr to %llx\n", i,
SAS_ADDR(asd_ha->hw_prof.sas_addr));
memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
}
}
/* ---------- PHY initialization ---------- */
static void asd_init_phy_identify(struct asd_phy *phy)
{
phy->identify_frame = phy->id_frm_tok->vaddr;
memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
phy->identify_frame->dev_type = SAS_END_DEV;
if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
if (phy->sas_phy.role & PHY_ROLE_TARGET)
phy->identify_frame->target_bits = phy->sas_phy.tproto;
memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
SAS_ADDR_SIZE);
phy->identify_frame->phy_id = phy->sas_phy.id;
}
static int asd_init_phy(struct asd_phy *phy)
{
struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
sas_phy->enabled = 1;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
phy->id_frm_tok = asd_alloc_coherent(asd_ha,
sizeof(*phy->identify_frame),
GFP_KERNEL);
if (!phy->id_frm_tok) {
asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
return -ENOMEM;
} else
asd_init_phy_identify(phy);
memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
return 0;
}
static void asd_init_ports(struct asd_ha_struct *asd_ha)
{
int i;
spin_lock_init(&asd_ha->asd_ports_lock);
for (i = 0; i < ASD_MAX_PHYS; i++) {
struct asd_port *asd_port = &asd_ha->asd_ports[i];
memset(asd_port->sas_addr, 0, SAS_ADDR_SIZE);
memset(asd_port->attached_sas_addr, 0, SAS_ADDR_SIZE);
asd_port->phy_mask = 0;
asd_port->num_phys = 0;
}
}
static int asd_init_phys(struct asd_ha_struct *asd_ha)
{
u8 i;
u8 phy_mask = asd_ha->hw_prof.enabled_phys;
for (i = 0; i < ASD_MAX_PHYS; i++) {
struct asd_phy *phy = &asd_ha->phys[i];
phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
phy->asd_port = NULL;
phy->sas_phy.enabled = 0;
phy->sas_phy.id = i;
phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
phy->sas_phy.ha = &asd_ha->sas_ha;
phy->sas_phy.lldd_phy = phy;
}
/* Now enable and initialize only the enabled phys. */
for_each_phy(phy_mask, phy_mask, i) {
int err = asd_init_phy(&asd_ha->phys[i]);
if (err)
return err;
}
return 0;
}
/* ---------- Sliding windows ---------- */
static int asd_init_sw(struct asd_ha_struct *asd_ha)
{
struct pci_dev *pcidev = asd_ha->pcidev;
int err;
u32 v;
/* Unlock MBARs */
err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
if (err) {
asd_printk("couldn't access conf. space of %s\n",
pci_name(pcidev));
goto Err;
}
if (v)
err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
if (err) {
asd_printk("couldn't write to MBAR_KEY of %s\n",
pci_name(pcidev));
goto Err;
}
/* Set sliding windows A, B and C to point to proper internal
* memory regions.
*/
pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
REG_BASE_ADDR_CSEQCIO);
pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
if (!asd_ha->iospace) {
/* MBAR1 will point to OCM (On Chip Memory) */
pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
}
spin_lock_init(&asd_ha->iolock);
Err:
return err;
}
/* ---------- SCB initialization ---------- */
/**
* asd_init_scbs - manually allocate the first SCB.
* @asd_ha: pointer to host adapter structure
*
* This allocates the very first SCB which would be sent to the
* sequencer for execution. Its bus address is written to
* CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of
* the _next_ scb to be DMA-ed to the host adapter is read from the last
* SCB DMA-ed to the host adapter, we have to always stay one step
* ahead of the sequencer and keep one SCB already allocated.
*/
static int asd_init_scbs(struct asd_ha_struct *asd_ha)
{
struct asd_seq_data *seq = &asd_ha->seq;
int bitmap_bytes;
/* allocate the index array and bitmap */
asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
sizeof(void *), GFP_KERNEL);
if (!asd_ha->seq.tc_index_array)
return -ENOMEM;
bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
if (!asd_ha->seq.tc_index_bitmap)
return -ENOMEM;
spin_lock_init(&seq->tc_index_lock);
seq->next_scb.size = sizeof(struct scb);
seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
&seq->next_scb.dma_handle);
if (!seq->next_scb.vaddr) {
kfree(asd_ha->seq.tc_index_bitmap);
kfree(asd_ha->seq.tc_index_array);
asd_ha->seq.tc_index_bitmap = NULL;
asd_ha->seq.tc_index_array = NULL;
return -ENOMEM;
}
seq->pending = 0;
spin_lock_init(&seq->pend_q_lock);
INIT_LIST_HEAD(&seq->pend_q);
return 0;
}
static void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
{
asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
asd_ha->hw_prof.max_scbs,
asd_ha->hw_prof.max_ddbs);
}
/* ---------- Done List initialization ---------- */
static void asd_dl_tasklet_handler(unsigned long);
static int asd_init_dl(struct asd_ha_struct *asd_ha)
{
asd_ha->seq.actual_dl
= asd_alloc_coherent(asd_ha,
ASD_DL_SIZE * sizeof(struct done_list_struct),
GFP_KERNEL);
if (!asd_ha->seq.actual_dl)
return -ENOMEM;
asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
asd_ha->seq.dl_next = 0;
tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
(unsigned long) asd_ha);
return 0;
}
/* ---------- EDB and ESCB init ---------- */
static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, gfp_t gfp_flags)
{
struct asd_seq_data *seq = &asd_ha->seq;
int i;
seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
if (!seq->edb_arr)
return -ENOMEM;
for (i = 0; i < seq->num_edbs; i++) {
seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
gfp_flags);
if (!seq->edb_arr[i])
goto Err_unroll;
memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
}
ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
return 0;
Err_unroll:
for (i-- ; i >= 0; i--)
asd_free_coherent(asd_ha, seq->edb_arr[i]);
kfree(seq->edb_arr);
seq->edb_arr = NULL;
return -ENOMEM;
}
static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
gfp_t gfp_flags)
{
struct asd_seq_data *seq = &asd_ha->seq;
struct asd_ascb *escb;
int i, escbs;
seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
gfp_flags);
if (!seq->escb_arr)
return -ENOMEM;
escbs = seq->num_escbs;
escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
if (!escb) {
asd_printk("couldn't allocate list of escbs\n");
goto Err;
}
seq->num_escbs -= escbs; /* subtract what was not allocated */
ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
struct asd_ascb,
list)) {
seq->escb_arr[i] = escb;
escb->scb->header.opcode = EMPTY_SCB;
}
return 0;
Err:
kfree(seq->escb_arr);
seq->escb_arr = NULL;
return -ENOMEM;
}
static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
{
struct asd_seq_data *seq = &asd_ha->seq;
int i, k, z = 0;
for (i = 0; i < seq->num_escbs; i++) {
struct asd_ascb *ascb = seq->escb_arr[i];
struct empty_scb *escb = &ascb->scb->escb;
ascb->edb_index = z;
escb->num_valid = ASD_EDBS_PER_SCB;
for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
struct sg_el *eb = &escb->eb[k];
struct asd_dma_tok *edb = seq->edb_arr[z++];
memset(eb, 0, sizeof(*eb));
eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
eb->size = cpu_to_le32(((u32) edb->size));
}
}
}
/**
* asd_init_escbs -- allocate and initialize empty scbs
* @asd_ha: pointer to host adapter structure
*
* An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
* They transport sense data, etc.
*/
static int asd_init_escbs(struct asd_ha_struct *asd_ha)
{
struct asd_seq_data *seq = &asd_ha->seq;
int err = 0;
/* Allocate two empty data buffers (edb) per sequencer. */
int edbs = 2*(1+asd_ha->hw_prof.num_phys);
seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
if (err) {
asd_printk("couldn't allocate edbs\n");
return err;
}
err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
if (err) {
asd_printk("couldn't allocate escbs\n");
return err;
}
asd_assign_edbs2escbs(asd_ha);
/* In order to insure that normal SCBs do not overfill sequencer
* memory and leave no space for escbs (halting condition),
* we increment pending here by the number of escbs. However,
* escbs are never pending.
*/
seq->pending = seq->num_escbs;
seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
return 0;
}
/* ---------- HW initialization ---------- */
/**
* asd_chip_hardrst -- hard reset the chip
* @asd_ha: pointer to host adapter structure
*
* This takes 16 cycles and is synchronous to CFCLK, which runs
* at 200 MHz, so this should take at most 80 nanoseconds.
*/
int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
{
int i;
int count = 100;
u32 reg;
for (i = 0 ; i < 4 ; i++) {
asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
}
do {
udelay(1);
reg = asd_read_reg_dword(asd_ha, CHIMINT);
if (reg & HARDRSTDET) {
asd_write_reg_dword(asd_ha, CHIMINT,
HARDRSTDET|PORRSTDET);
return 0;
}
} while (--count > 0);
return -ENODEV;
}
/**
* asd_init_chip -- initialize the chip
* @asd_ha: pointer to host adapter structure
*
* Hard resets the chip, disables HA interrupts, downloads the sequnecer
* microcode and starts the sequencers. The caller has to explicitly
* enable HA interrupts with asd_enable_ints(asd_ha).
*/
static int asd_init_chip(struct asd_ha_struct *asd_ha)
{
int err;
err = asd_chip_hardrst(asd_ha);
if (err) {
asd_printk("couldn't hard reset %s\n",
pci_name(asd_ha->pcidev));
goto out;
}
asd_disable_ints(asd_ha);
err = asd_init_seqs(asd_ha);
if (err) {
asd_printk("couldn't init seqs for %s\n",
pci_name(asd_ha->pcidev));
goto out;
}
err = asd_start_seqs(asd_ha);
if (err) {
asd_printk("coudln't start seqs for %s\n",
pci_name(asd_ha->pcidev));
goto out;
}
out:
return err;
}
#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
static int max_devs = 0;
module_param_named(max_devs, max_devs, int, S_IRUGO);
MODULE_PARM_DESC(max_devs, "\n"
"\tMaximum number of SAS devices to support (not LUs).\n"
"\tDefault: 2176, Maximum: 65663.\n");
static int max_cmnds = 0;
module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
MODULE_PARM_DESC(max_cmnds, "\n"
"\tMaximum number of commands queuable.\n"
"\tDefault: 512, Maximum: 66047.\n");
static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
{
unsigned long dma_addr = OCM_BASE_ADDR;
u32 d;
dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
d |= 4;
asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
asd_ha->hw_prof.max_ddbs += MAX_DEVS;
}
static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
{
dma_addr_t dma_handle;
unsigned long dma_addr;
u32 d;
int size;
asd_extend_devctx_ocm(asd_ha);
asd_ha->hw_prof.ddb_ext = NULL;
if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
max_devs = asd_ha->hw_prof.max_ddbs;
return 0;
}
size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
if (!asd_ha->hw_prof.ddb_ext) {
asd_printk("couldn't allocate memory for %d devices\n",
max_devs);
max_devs = asd_ha->hw_prof.max_ddbs;
return -ENOMEM;
}
dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
dma_handle = (dma_addr_t) dma_addr;
asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
d &= ~4;
asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
asd_ha->hw_prof.max_ddbs = max_devs;
return 0;
}
static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
{
dma_addr_t dma_handle;
unsigned long dma_addr;
u32 d;
int size;
asd_ha->hw_prof.scb_ext = NULL;
if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
max_cmnds = asd_ha->hw_prof.max_scbs;
return 0;
}
size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
if (!asd_ha->hw_prof.scb_ext) {
asd_printk("couldn't allocate memory for %d commands\n",
max_cmnds);
max_cmnds = asd_ha->hw_prof.max_scbs;
return -ENOMEM;
}
dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
dma_handle = (dma_addr_t) dma_addr;
asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
d &= ~1;
asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
asd_ha->hw_prof.max_scbs = max_cmnds;
return 0;
}
/**
* asd_init_ctxmem -- initialize context memory
* asd_ha: pointer to host adapter structure
*
* This function sets the maximum number of SCBs and
* DDBs which can be used by the sequencer. This is normally
* 512 and 128 respectively. If support for more SCBs or more DDBs
* is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
* initialized here to extend context memory to point to host memory,
* thus allowing unlimited support for SCBs and DDBs -- only limited
* by host memory.
*/
static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
{
int bitmap_bytes;
asd_get_max_scb_ddb(asd_ha);
asd_extend_devctx(asd_ha);
asd_extend_cmdctx(asd_ha);
/* The kernel wants bitmaps to be unsigned long sized. */
bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
if (!asd_ha->hw_prof.ddb_bitmap)
return -ENOMEM;
spin_lock_init(&asd_ha->hw_prof.ddb_lock);
return 0;
}
int asd_init_hw(struct asd_ha_struct *asd_ha)
{
int err;
u32 v;
err = asd_init_sw(asd_ha);
if (err)
return err;
err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
if (err) {
asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
pci_name(asd_ha->pcidev));
return err;
}
pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
v | SC_TMR_DIS);
if (err) {
asd_printk("couldn't disable split completion timer of %s\n",
pci_name(asd_ha->pcidev));
return err;
}
err = asd_read_ocm(asd_ha);
if (err) {
asd_printk("couldn't read ocm(%d)\n", err);
/* While suspicios, it is not an error that we
* couldn't read the OCM. */
}
err = asd_read_flash(asd_ha);
if (err) {
asd_printk("couldn't read flash(%d)\n", err);
/* While suspicios, it is not an error that we
* couldn't read FLASH memory.
*/
}
asd_init_ctxmem(asd_ha);
if (asd_get_user_sas_addr(asd_ha)) {
asd_printk("No SAS Address provided for %s\n",
pci_name(asd_ha->pcidev));
err = -ENODEV;
goto Out;
}
asd_propagate_sas_addr(asd_ha);
err = asd_init_phys(asd_ha);
if (err) {
asd_printk("couldn't initialize phys for %s\n",
pci_name(asd_ha->pcidev));
goto Out;
}
asd_init_ports(asd_ha);
err = asd_init_scbs(asd_ha);
if (err) {
asd_printk("couldn't initialize scbs for %s\n",
pci_name(asd_ha->pcidev));
goto Out;
}
err = asd_init_dl(asd_ha);
if (err) {
asd_printk("couldn't initialize the done list:%d\n",
err);
goto Out;
}
err = asd_init_escbs(asd_ha);
if (err) {
asd_printk("couldn't initialize escbs\n");
goto Out;
}
err = asd_init_chip(asd_ha);
if (err) {
asd_printk("couldn't init the chip\n");
goto Out;
}
Out:
return err;
}
/* ---------- Chip reset ---------- */
/**
* asd_chip_reset -- reset the host adapter, etc
* @asd_ha: pointer to host adapter structure of interest
*
* Called from the ISR. Hard reset the chip. Let everything
* timeout. This should be no different than hot-unplugging the
* host adapter. Once everything times out we'll init the chip with
* a call to asd_init_chip() and enable interrupts with asd_enable_ints().
* XXX finish.
*/
static void asd_chip_reset(struct asd_ha_struct *asd_ha)
{
struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
asd_chip_hardrst(asd_ha);
sas_ha->notify_ha_event(sas_ha, HAE_RESET);
}
/* ---------- Done List Routines ---------- */
static void asd_dl_tasklet_handler(unsigned long data)
{
struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
struct asd_seq_data *seq = &asd_ha->seq;
unsigned long flags;
while (1) {
struct done_list_struct *dl = &seq->dl[seq->dl_next];
struct asd_ascb *ascb;
if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
break;
/* find the aSCB */
spin_lock_irqsave(&seq->tc_index_lock, flags);
ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
spin_unlock_irqrestore(&seq->tc_index_lock, flags);
if (unlikely(!ascb)) {
ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
goto next_1;
} else if (ascb->scb->header.opcode == EMPTY_SCB) {
goto out;
} else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
goto next_1;
}
spin_lock_irqsave(&seq->pend_q_lock, flags);
list_del_init(&ascb->list);
seq->pending--;
spin_unlock_irqrestore(&seq->pend_q_lock, flags);
out:
ascb->tasklet_complete(ascb, dl);
next_1:
seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
if (!seq->dl_next)
seq->dl_toggle ^= DL_TOGGLE_MASK;
}
}
/* ---------- Interrupt Service Routines ---------- */
/**
* asd_process_donelist_isr -- schedule processing of done list entries
* @asd_ha: pointer to host adapter structure
*/
static void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
{
tasklet_schedule(&asd_ha->seq.dl_tasklet);
}
/**
* asd_com_sas_isr -- process device communication interrupt (COMINT)
* @asd_ha: pointer to host adapter structure
*/
static void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
{
u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
/* clear COMSTAT int */
asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
if (comstat & CSBUFPERR) {
asd_printk("%s: command/status buffer dma parity error\n",
pci_name(asd_ha->pcidev));
} else if (comstat & CSERR) {
int i;
u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
dmaerr &= 0xFF;
asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
"CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
pci_name(asd_ha->pcidev),
dmaerr,
asd_read_reg_dword(asd_ha, CSDMAADR),
asd_read_reg_dword(asd_ha, CSDMAADR+4));
asd_printk("CSBUFFER:\n");
for (i = 0; i < 8; i++) {
asd_printk("%08x %08x %08x %08x\n",
asd_read_reg_dword(asd_ha, CSBUFFER),
asd_read_reg_dword(asd_ha, CSBUFFER+4),
asd_read_reg_dword(asd_ha, CSBUFFER+8),
asd_read_reg_dword(asd_ha, CSBUFFER+12));
}
asd_dump_seq_state(asd_ha, 0);
} else if (comstat & OVLYERR) {
u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
dmaerr = (dmaerr >> 8) & 0xFF;
asd_printk("%s: overlay dma error:0x%x\n",
pci_name(asd_ha->pcidev),
dmaerr);
}
asd_chip_reset(asd_ha);
}
static void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
{
static const char *halt_code[256] = {
"UNEXPECTED_INTERRUPT0",
"UNEXPECTED_INTERRUPT1",
"UNEXPECTED_INTERRUPT2",
"UNEXPECTED_INTERRUPT3",
"UNEXPECTED_INTERRUPT4",
"UNEXPECTED_INTERRUPT5",
"UNEXPECTED_INTERRUPT6",
"UNEXPECTED_INTERRUPT7",
"UNEXPECTED_INTERRUPT8",
"UNEXPECTED_INTERRUPT9",
"UNEXPECTED_INTERRUPT10",
[11 ... 19] = "unknown[11,19]",
"NO_FREE_SCB_AVAILABLE",
"INVALID_SCB_OPCODE",
"INVALID_MBX_OPCODE",
"INVALID_ATA_STATE",
"ATA_QUEUE_FULL",
"ATA_TAG_TABLE_FAULT",
"ATA_TAG_MASK_FAULT",
"BAD_LINK_QUEUE_STATE",
"DMA2CHIM_QUEUE_ERROR",
"EMPTY_SCB_LIST_FULL",
"unknown[30]",
"IN_USE_SCB_ON_FREE_LIST",
"BAD_OPEN_WAIT_STATE",
"INVALID_STP_AFFILIATION",
"unknown[34]",
"EXEC_QUEUE_ERROR",
"TOO_MANY_EMPTIES_NEEDED",
"EMPTY_REQ_QUEUE_ERROR",
"Q_MONIRTT_MGMT_ERROR",
"TARGET_MODE_FLOW_ERROR",
"DEVICE_QUEUE_NOT_FOUND",
"START_IRTT_TIMER_ERROR",
"ABORT_TASK_ILLEGAL_REQ",
[43 ... 255] = "unknown[43,255]"
};
if (dchstatus & CSEQINT) {
u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
asd_printk("%s: CSEQ arp2int:0x%x\n",
pci_name(asd_ha->pcidev),
arp2int);
} else if (arp2int & ARP2HALTC)
asd_printk("%s: CSEQ halted: %s\n",
pci_name(asd_ha->pcidev),
halt_code[(arp2int>>16)&0xFF]);
else
asd_printk("%s: CARP2INT:0x%x\n",
pci_name(asd_ha->pcidev),
arp2int);
}
if (dchstatus & LSEQINT_MASK) {
int lseq;
u8 lseq_mask = dchstatus & LSEQINT_MASK;
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
u32 arp2int = asd_read_reg_dword(asd_ha,
LmARP2INT(lseq));
if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
| ARP2CIOPERR)) {
asd_printk("%s: LSEQ%d arp2int:0x%x\n",
pci_name(asd_ha->pcidev),
lseq, arp2int);
/* XXX we should only do lseq reset */
} else if (arp2int & ARP2HALTC)
asd_printk("%s: LSEQ%d halted: %s\n",
pci_name(asd_ha->pcidev),
lseq,halt_code[(arp2int>>16)&0xFF]);
else
asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
pci_name(asd_ha->pcidev), lseq,
arp2int);
}
}
asd_chip_reset(asd_ha);
}
/**
* asd_dch_sas_isr -- process device channel interrupt (DEVINT)
* @asd_ha: pointer to host adapter structure
*/
static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
{
u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
if (dchstatus & CFIFTOERR) {
asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
asd_chip_reset(asd_ha);
} else
asd_arp2_err(asd_ha, dchstatus);
}
/**
* ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
* @asd_ha: pointer to host adapter structure
*/
static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
{
u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
if (!(stat0r & ASIERR)) {
asd_printk("hmm, EXSI interrupted but no error?\n");
return;
}
if (stat0r & ASIFMTERR) {
asd_printk("ASI SEEPROM format error for %s\n",
pci_name(asd_ha->pcidev));
} else if (stat0r & ASISEECHKERR) {
u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
stat1r & CHECKSUM_MASK,
pci_name(asd_ha->pcidev));
} else {
u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
if (!(statr & CPI2ASIMSTERR_MASK)) {
ASD_DPRINTK("hmm, ASIERR?\n");
return;
} else {
u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
"count: 0x%x, byteen: 0x%x, targerr: 0x%x "
"master id: 0x%x, master err: 0x%x\n",
pci_name(asd_ha->pcidev),
addr, data,
(statr & CPI2ASIBYTECNT_MASK) >> 16,
(statr & CPI2ASIBYTEEN_MASK) >> 12,
(statr & CPI2ASITARGERR_MASK) >> 8,
(statr & CPI2ASITARGMID_MASK) >> 4,
(statr & CPI2ASIMSTERR_MASK));
}
}
asd_chip_reset(asd_ha);
}
/**
* asd_hst_pcix_isr -- process host interface interrupts
* @asd_ha: pointer to host adapter structure
*
* Asserted on PCIX errors: target abort, etc.
*/
static void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
{
u16 status;
u32 pcix_status;
u32 ecc_status;
pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
if (status & PCI_STATUS_DETECTED_PARITY)
asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
else if (status & PCI_STATUS_REC_MASTER_ABORT)
asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
else if (status & PCI_STATUS_REC_TARGET_ABORT)
asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
else if (status & PCI_STATUS_PARITY)
asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
else if (pcix_status & RCV_SCE) {
asd_printk("received split completion error for %s\n",
pci_name(asd_ha->pcidev));
pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
/* XXX: Abort task? */
return;
} else if (pcix_status & UNEXP_SC) {
asd_printk("unexpected split completion for %s\n",
pci_name(asd_ha->pcidev));
pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
/* ignore */
return;
} else if (pcix_status & SC_DISCARD)
asd_printk("split completion discarded for %s\n",
pci_name(asd_ha->pcidev));
else if (ecc_status & UNCOR_ECCERR)
asd_printk("uncorrectable ECC error for %s\n",
pci_name(asd_ha->pcidev));
asd_chip_reset(asd_ha);
}
/**
* asd_hw_isr -- host adapter interrupt service routine
* @irq: ignored
* @dev_id: pointer to host adapter structure
*
* The ISR processes done list entries and level 3 error handling.
*/
irqreturn_t asd_hw_isr(int irq, void *dev_id)
{
struct asd_ha_struct *asd_ha = dev_id;
u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
if (!chimint)
return IRQ_NONE;
asd_write_reg_dword(asd_ha, CHIMINT, chimint);
(void) asd_read_reg_dword(asd_ha, CHIMINT);
if (chimint & DLAVAIL)
asd_process_donelist_isr(asd_ha);
if (chimint & COMINT)
asd_com_sas_isr(asd_ha);
if (chimint & DEVINT)
asd_dch_sas_isr(asd_ha);
if (chimint & INITERR)
asd_rbi_exsi_isr(asd_ha);
if (chimint & HOSTERR)
asd_hst_pcix_isr(asd_ha);
return IRQ_HANDLED;
}
/* ---------- SCB handling ---------- */
static struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
gfp_t gfp_flags)
{
extern struct kmem_cache *asd_ascb_cache;
struct asd_seq_data *seq = &asd_ha->seq;
struct asd_ascb *ascb;
unsigned long flags;
ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags);
if (ascb) {
ascb->dma_scb.size = sizeof(struct scb);
ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
gfp_flags,
&ascb->dma_scb.dma_handle);
if (!ascb->dma_scb.vaddr) {
kmem_cache_free(asd_ascb_cache, ascb);
return NULL;
}
memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
asd_init_ascb(asd_ha, ascb);
spin_lock_irqsave(&seq->tc_index_lock, flags);
ascb->tc_index = asd_tc_index_get(seq, ascb);
spin_unlock_irqrestore(&seq->tc_index_lock, flags);
if (ascb->tc_index == -1)
goto undo;
ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
}
return ascb;
undo:
dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
ascb->dma_scb.dma_handle);
kmem_cache_free(asd_ascb_cache, ascb);
ASD_DPRINTK("no index for ascb\n");
return NULL;
}
/**
* asd_ascb_alloc_list -- allocate a list of aSCBs
* @asd_ha: pointer to host adapter structure
* @num: pointer to integer number of aSCBs
* @gfp_flags: GFP_ flags.
*
* This is the only function which is used to allocate aSCBs.
* It can allocate one or many. If more than one, then they form
* a linked list in two ways: by their list field of the ascb struct
* and by the next_scb field of the scb_header.
*
* Returns NULL if no memory was available, else pointer to a list
* of ascbs. When this function returns, @num would be the number
* of SCBs which were not able to be allocated, 0 if all requested
* were able to be allocated.
*/
struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
*asd_ha, int *num,
gfp_t gfp_flags)
{
struct asd_ascb *first = NULL;
for ( ; *num > 0; --*num) {
struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
if (!ascb)
break;
else if (!first)
first = ascb;
else {
struct asd_ascb *last = list_entry(first->list.prev,
struct asd_ascb,
list);
list_add_tail(&ascb->list, &first->list);
last->scb->header.next_scb =
cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
}
}
return first;
}
/**
* asd_swap_head_scb -- swap the head scb
* @asd_ha: pointer to host adapter structure
* @ascb: pointer to the head of an ascb list
*
* The sequencer knows the DMA address of the next SCB to be DMAed to
* the host adapter, from initialization or from the last list DMAed.
* seq->next_scb keeps the address of this SCB. The sequencer will
* DMA to the host adapter this list of SCBs. But the head (first
* element) of this list is not known to the sequencer. Here we swap
* the head of the list with the known SCB (memcpy()).
* Only one memcpy() is required per list so it is in our interest
* to keep the list of SCB as long as possible so that the ratio
* of number of memcpy calls to the number of SCB DMA-ed is as small
* as possible.
*
* LOCKING: called with the pending list lock held.
*/
static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
struct asd_ascb *ascb)
{
struct asd_seq_data *seq = &asd_ha->seq;
struct asd_ascb *last = list_entry(ascb->list.prev,
struct asd_ascb,
list);
struct asd_dma_tok t = ascb->dma_scb;
memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
ascb->dma_scb = seq->next_scb;
ascb->scb = ascb->dma_scb.vaddr;
seq->next_scb = t;
last->scb->header.next_scb =
cpu_to_le64(((u64)seq->next_scb.dma_handle));
}
/**
* asd_start_timers -- (add and) start timers of SCBs
* @list: pointer to struct list_head of the scbs
* @to: timeout in jiffies
*
* If an SCB in the @list has no timer function, assign the default
* one, then start the timer of the SCB. This function is
* intended to be called from asd_post_ascb_list(), just prior to
* posting the SCBs to the sequencer.
*/
static void asd_start_scb_timers(struct list_head *list)
{
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
ascb->timer.data = (unsigned long) ascb;
ascb->timer.function = asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
}
}
}
/**
* asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
* @asd_ha: pointer to a host adapter structure
* @ascb: pointer to the first aSCB in the list
* @num: number of aSCBs in the list (to be posted)
*
* See queueing comment in asd_post_escb_list().
*
* Additional note on queuing: In order to minimize the ratio of memcpy()
* to the number of ascbs sent, we try to batch-send as many ascbs as possible
* in one go.
* Two cases are possible:
* A) can_queue >= num,
* B) can_queue < num.
* Case A: we can send the whole batch at once. Increment "pending"
* in the beginning of this function, when it is checked, in order to
* eliminate races when this function is called by multiple processes.
* Case B: should never happen if the managing layer considers
* lldd_queue_size.
*/
int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
int num)
{
unsigned long flags;
LIST_HEAD(list);
int can_queue;
spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
if (can_queue >= num)
asd_ha->seq.pending += num;
else
can_queue = 0;
if (!can_queue) {
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
return -SAS_QUEUE_FULL;
}
asd_swap_head_scb(asd_ha, ascb);
__list_add(&list, ascb->list.prev, &ascb->list);
asd_start_scb_timers(&list);
asd_ha->seq.scbpro += num;
list_splice_init(&list, asd_ha->seq.pend_q.prev);
asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
return 0;
}
/**
* asd_post_escb_list -- post a list of 1 or more empty scb
* @asd_ha: pointer to a host adapter structure
* @ascb: pointer to the first empty SCB in the list
* @num: number of aSCBs in the list (to be posted)
*
* This is essentially the same as asd_post_ascb_list, but we do not
* increment pending, add those to the pending list or get indexes.
* See asd_init_escbs() and asd_init_post_escbs().
*
* Since sending a list of ascbs is a superset of sending a single
* ascb, this function exists to generalize this. More specifically,
* when sending a list of those, we want to do only a _single_
* memcpy() at swap head, as opposed to for each ascb sent (in the
* case of sending them one by one). That is, we want to minimize the
* ratio of memcpy() operations to the number of ascbs sent. The same
* logic applies to asd_post_ascb_list().
*/
int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
int num)
{
unsigned long flags;
spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
asd_swap_head_scb(asd_ha, ascb);
asd_ha->seq.scbpro += num;
asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
return 0;
}
/* ---------- LED ---------- */
/**
* asd_turn_led -- turn on/off an LED
* @asd_ha: pointer to host adapter structure
* @phy_id: the PHY id whose LED we want to manupulate
* @op: 1 to turn on, 0 to turn off
*/
void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
{
if (phy_id < ASD_MAX_PHYS) {
u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
if (op)
v |= LEDPOL;
else
v &= ~LEDPOL;
asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
}
}
/**
* asd_control_led -- enable/disable an LED on the board
* @asd_ha: pointer to host adapter structure
* @phy_id: integer, the phy id
* @op: integer, 1 to enable, 0 to disable the LED
*
* First we output enable the LED, then we set the source
* to be an external module.
*/
void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
{
if (phy_id < ASD_MAX_PHYS) {
u32 v;
v = asd_read_reg_dword(asd_ha, GPIOOER);
if (op)
v |= (1 << phy_id);
else
v &= ~(1 << phy_id);
asd_write_reg_dword(asd_ha, GPIOOER, v);
v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
if (op)
v |= (1 << phy_id);
else
v &= ~(1 << phy_id);
asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
}
}
/* ---------- PHY enable ---------- */
static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
{
struct asd_phy *phy = &asd_ha->phys[phy_id];
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
HOTPLUG_DELAY_TIMEOUT);
/* Get defaults from manuf. sector */
/* XXX we need defaults for those in case MS is broken. */
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
phy->phy_desc->phy_control_0);
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
phy->phy_desc->phy_control_1);
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
phy->phy_desc->phy_control_2);
asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
phy->phy_desc->phy_control_3);
asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
ASD_COMINIT_TIMEOUT);
asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
phy->id_frm_tok->dma_handle);
asd_control_led(asd_ha, phy_id, 1);
return 0;
}
int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
{
u8 phy_m;
u8 i;
int num = 0, k;
struct asd_ascb *ascb;
struct asd_ascb *ascb_list;
if (!phy_mask) {
asd_printk("%s called with phy_mask of 0!?\n", __func__);
return 0;
}
for_each_phy(phy_mask, phy_m, i) {
num++;
asd_enable_phy(asd_ha, i);
}
k = num;
ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
if (!ascb_list) {
asd_printk("no memory for control phy ascb list\n");
return -ENOMEM;
}
num -= k;
ascb = ascb_list;
for_each_phy(phy_mask, phy_m, i) {
asd_build_control_phy(ascb, i, ENABLE_PHY);
ascb = list_entry(ascb->list.next, struct asd_ascb, list);
}
ASD_DPRINTK("posting %d control phy scbs\n", num);
k = asd_post_ascb_list(asd_ha, ascb_list, num);
if (k)
asd_ascb_free_list(ascb_list);
return k;
}
| gpl-2.0 |
theboleslaw/hammerhead_kernel | drivers/infiniband/hw/ehca/ehca_av.c | 13491 | 8046 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* address vector functions
*
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Khadija Souissi <souissik@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
static struct kmem_cache *av_cache;
int ehca_calc_ipd(struct ehca_shca *shca, int port,
enum ib_rate path_rate, u32 *ipd)
{
int path = ib_rate_to_mult(path_rate);
int link, ret;
struct ib_port_attr pa;
if (path_rate == IB_RATE_PORT_CURRENT) {
*ipd = 0;
return 0;
}
if (unlikely(path < 0)) {
ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
path_rate);
return -EINVAL;
}
ret = ehca_query_port(&shca->ib_device, port, &pa);
if (unlikely(ret < 0)) {
ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
return ret;
}
link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
if (path >= link)
/* no need to throttle if path faster than link */
*ipd = 0;
else
/* IPD = round((link / path) - 1) */
*ipd = ((link + (path >> 1)) / path) - 1;
return 0;
}
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
int ret;
struct ehca_av *av;
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
av = kmem_cache_alloc(av_cache, GFP_KERNEL);
if (!av) {
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
pd, ah_attr);
return ERR_PTR(-ENOMEM);
}
av->av.sl = ah_attr->sl;
av->av.dlid = ah_attr->dlid;
av->av.slid_path_bits = ah_attr->src_path_bits;
if (ehca_static_rate < 0) {
u32 ipd;
if (ehca_calc_ipd(shca, ah_attr->port_num,
ah_attr->static_rate, &ipd)) {
ret = -EINVAL;
goto create_ah_exit1;
}
av->av.ipd = ipd;
} else
av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(pd->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ret = -EINVAL;
ehca_err(pd->device, "Invalid port number "
"ehca_query_port() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(pd->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ret = -EINVAL;
ehca_err(pd->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
}
av->av.pmtu = shca->max_mtu;
/* dgid comes in grh.word_3 */
memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
return &av->ib_ah;
create_ah_exit1:
kmem_cache_free(av_cache, av);
return ERR_PTR(ret);
}
int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av;
struct ehca_ud_av new_ehca_av;
struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
ib_device);
memset(&new_ehca_av, 0, sizeof(new_ehca_av));
new_ehca_av.sl = ah_attr->sl;
new_ehca_av.dlid = ah_attr->dlid;
new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
new_ehca_av.ipd = ah_attr->static_rate;
new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
(ah_attr->ah_flags & IB_AH_GRH) > 0);
new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(ah->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ehca_err(ah->device, "Invalid port number "
"ehca_query_port() returned %x "
"ah=%p ah_attr=%p port_num=%x",
rc, ah, ah_attr, ah_attr->port_num);
return -EINVAL;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(ah->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ehca_err(ah->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"ah=%p ah_attr=%p port_num=%x "
"sgid_index=%x",
rc, ah, ah_attr, ah_attr->port_num,
ah_attr->grh.sgid_index);
return -EINVAL;
}
memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
}
new_ehca_av.pmtu = shca->max_mtu;
memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
av = container_of(ah, struct ehca_av, ib_ah);
av->av = new_ehca_av;
return 0;
}
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
sizeof(ah_attr->grh.dgid));
ah_attr->sl = av->av.sl;
ah_attr->dlid = av->av.dlid;
ah_attr->src_path_bits = av->av.slid_path_bits;
ah_attr->static_rate = av->av.ipd;
ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
av->av.grh.word_0);
ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
av->av.grh.word_0);
ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
av->av.grh.word_0);
return 0;
}
int ehca_destroy_ah(struct ib_ah *ah)
{
kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
return 0;
}
int ehca_init_av_cache(void)
{
av_cache = kmem_cache_create("ehca_cache_av",
sizeof(struct ehca_av), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!av_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_av_cache(void)
{
if (av_cache)
kmem_cache_destroy(av_cache);
}
| gpl-2.0 |
hroark13/prevail_kernel_fixed | drivers/infiniband/hw/ehca/ehca_av.c | 13491 | 8046 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* address vector functions
*
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Khadija Souissi <souissik@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
static struct kmem_cache *av_cache;
int ehca_calc_ipd(struct ehca_shca *shca, int port,
enum ib_rate path_rate, u32 *ipd)
{
int path = ib_rate_to_mult(path_rate);
int link, ret;
struct ib_port_attr pa;
if (path_rate == IB_RATE_PORT_CURRENT) {
*ipd = 0;
return 0;
}
if (unlikely(path < 0)) {
ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
path_rate);
return -EINVAL;
}
ret = ehca_query_port(&shca->ib_device, port, &pa);
if (unlikely(ret < 0)) {
ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
return ret;
}
link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
if (path >= link)
/* no need to throttle if path faster than link */
*ipd = 0;
else
/* IPD = round((link / path) - 1) */
*ipd = ((link + (path >> 1)) / path) - 1;
return 0;
}
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
int ret;
struct ehca_av *av;
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
av = kmem_cache_alloc(av_cache, GFP_KERNEL);
if (!av) {
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
pd, ah_attr);
return ERR_PTR(-ENOMEM);
}
av->av.sl = ah_attr->sl;
av->av.dlid = ah_attr->dlid;
av->av.slid_path_bits = ah_attr->src_path_bits;
if (ehca_static_rate < 0) {
u32 ipd;
if (ehca_calc_ipd(shca, ah_attr->port_num,
ah_attr->static_rate, &ipd)) {
ret = -EINVAL;
goto create_ah_exit1;
}
av->av.ipd = ipd;
} else
av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(pd->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ret = -EINVAL;
ehca_err(pd->device, "Invalid port number "
"ehca_query_port() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(pd->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ret = -EINVAL;
ehca_err(pd->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
}
av->av.pmtu = shca->max_mtu;
/* dgid comes in grh.word_3 */
memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
return &av->ib_ah;
create_ah_exit1:
kmem_cache_free(av_cache, av);
return ERR_PTR(ret);
}
int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av;
struct ehca_ud_av new_ehca_av;
struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
ib_device);
memset(&new_ehca_av, 0, sizeof(new_ehca_av));
new_ehca_av.sl = ah_attr->sl;
new_ehca_av.dlid = ah_attr->dlid;
new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
new_ehca_av.ipd = ah_attr->static_rate;
new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
(ah_attr->ah_flags & IB_AH_GRH) > 0);
new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(ah->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ehca_err(ah->device, "Invalid port number "
"ehca_query_port() returned %x "
"ah=%p ah_attr=%p port_num=%x",
rc, ah, ah_attr, ah_attr->port_num);
return -EINVAL;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(ah->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ehca_err(ah->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"ah=%p ah_attr=%p port_num=%x "
"sgid_index=%x",
rc, ah, ah_attr, ah_attr->port_num,
ah_attr->grh.sgid_index);
return -EINVAL;
}
memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
}
new_ehca_av.pmtu = shca->max_mtu;
memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
av = container_of(ah, struct ehca_av, ib_ah);
av->av = new_ehca_av;
return 0;
}
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
sizeof(ah_attr->grh.dgid));
ah_attr->sl = av->av.sl;
ah_attr->dlid = av->av.dlid;
ah_attr->src_path_bits = av->av.slid_path_bits;
ah_attr->static_rate = av->av.ipd;
ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
av->av.grh.word_0);
ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
av->av.grh.word_0);
ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
av->av.grh.word_0);
return 0;
}
int ehca_destroy_ah(struct ib_ah *ah)
{
kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
return 0;
}
int ehca_init_av_cache(void)
{
av_cache = kmem_cache_create("ehca_cache_av",
sizeof(struct ehca_av), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!av_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_av_cache(void)
{
if (av_cache)
kmem_cache_destroy(av_cache);
}
| gpl-2.0 |
bju2000/android_kernel_samsung_hlte | drivers/message/i2o/config-osm.c | 13491 | 2170 | /*
* Configuration OSM
*
* Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Fixes/additions:
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* initial version.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#define OSM_NAME "config-osm"
#define OSM_VERSION "1.323"
#define OSM_DESCRIPTION "I2O Configuration OSM"
/* access mode user rw */
#define S_IWRSR (S_IRUSR | S_IWUSR)
static struct i2o_driver i2o_config_driver;
/* Config OSM driver struct */
static struct i2o_driver i2o_config_driver = {
.name = OSM_NAME,
};
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
#include "i2o_config.c"
#endif
/**
* i2o_config_init - Configuration OSM initialization function
*
* Registers Configuration OSM in the I2O core and if old ioctl's are
* compiled in initialize them.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_config_init(void)
{
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
if (i2o_driver_register(&i2o_config_driver)) {
osm_err("handler register failed.\n");
return -EBUSY;
}
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
if (i2o_config_old_init()) {
osm_err("old config handler initialization failed\n");
i2o_driver_unregister(&i2o_config_driver);
return -EBUSY;
}
#endif
return 0;
}
/**
* i2o_config_exit - Configuration OSM exit function
*
* If old ioctl's are compiled in exit remove them and unregisters
* Configuration OSM from I2O core.
*/
static void i2o_config_exit(void)
{
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
i2o_config_old_exit();
#endif
i2o_driver_unregister(&i2o_config_driver);
}
MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_config_init);
module_exit(i2o_config_exit);
| gpl-2.0 |
synel/synergy2416-linux-kernel | net/ipv4/inet_diag.c | 180 | 23196 | /*
* inet_diag.c Module for monitoring INET transport protocols sockets.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/time.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/inet6_hashtables.h>
#include <net/netlink.h>
#include <linux/inet.h>
#include <linux/stddef.h>
#include <linux/inet_diag.h>
static const struct inet_diag_handler **inet_diag_table;
struct inet_diag_entry {
__be32 *saddr;
__be32 *daddr;
u16 sport;
u16 dport;
u16 family;
u16 userlocks;
};
static struct sock *idiagnl;
#define INET_DIAG_PUT(skb, attrtype, attrlen) \
RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int type)
{
if (!inet_diag_table[type])
request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
NETLINK_INET_DIAG, type);
mutex_lock(&inet_diag_table_mutex);
if (!inet_diag_table[type])
return ERR_PTR(-ENOENT);
return inet_diag_table[type];
}
static inline void inet_diag_unlock_handler(
const struct inet_diag_handler *handler)
{
mutex_unlock(&inet_diag_table_mutex);
}
static int inet_csk_diag_fill(struct sock *sk,
struct sk_buff *skb,
int ext, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
void *info = NULL;
struct inet_diag_meminfo *minfo = NULL;
unsigned char *b = skb_tail_pointer(skb);
const struct inet_diag_handler *handler;
handler = inet_diag_table[unlh->nlmsg_type];
BUG_ON(handler == NULL);
nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
nlh->nlmsg_flags = nlmsg_flags;
r = NLMSG_DATA(nlh);
BUG_ON(sk->sk_state == TCP_TIME_WAIT);
if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
if (ext & (1 << (INET_DIAG_INFO - 1)))
info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
handler->idiag_info_size);
if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
const size_t len = strlen(icsk->icsk_ca_ops->name);
strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
icsk->icsk_ca_ops->name);
}
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
r->id.idiag_if = sk->sk_bound_dev_if;
r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&np->rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&np->daddr);
}
#endif
#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->idiag_timer = 4;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (timer_pending(&sk->sk_timer)) {
r->idiag_timer = 2;
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
} else {
r->idiag_timer = 0;
r->idiag_expires = 0;
}
#undef EXPIRES_IN_MS
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = sock_i_ino(sk);
if (minfo) {
minfo->idiag_rmem = sk_rmem_alloc_get(sk);
minfo->idiag_wmem = sk->sk_wmem_queued;
minfo->idiag_fmem = sk->sk_forward_alloc;
minfo->idiag_tmem = sk_wmem_alloc_get(sk);
}
handler->idiag_get_info(sk, r, info);
if (sk->sk_state < TCP_TIME_WAIT &&
icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
icsk->icsk_ca_ops->get_info(sk, ext, skb);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
rtattr_failure:
nlmsg_failure:
nlmsg_trim(skb, b);
return -EMSGSIZE;
}
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
struct sk_buff *skb, int ext, u32 pid,
u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
long tmo;
struct inet_diag_msg *r;
const unsigned char *previous_tail = skb_tail_pointer(skb);
struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
unlh->nlmsg_type, sizeof(*r));
r = NLMSG_DATA(nlh);
BUG_ON(tw->tw_state != TCP_TIME_WAIT);
nlh->nlmsg_flags = nlmsg_flags;
tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
tmo = 0;
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
r->id.idiag_if = tw->tw_bound_dev_if;
r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = 0;
r->idiag_inode = 0;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (tw->tw_family == AF_INET6) {
const struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&tw6->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&tw6->tw_v6_daddr);
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
return skb->len;
nlmsg_failure:
nlmsg_trim(skb, previous_tail);
return -EMSGSIZE;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
int ext, u32 pid, u32 seq, u16 nlmsg_flags,
const struct nlmsghdr *unlh)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
skb, ext, pid, seq, nlmsg_flags,
unlh);
return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
}
static int inet_diag_get_exact(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{
int err;
struct sock *sk;
struct inet_diag_req *req = NLMSG_DATA(nlh);
struct sk_buff *rep;
struct inet_hashinfo *hashinfo;
const struct inet_diag_handler *handler;
handler = inet_diag_lock_handler(nlh->nlmsg_type);
if (IS_ERR(handler)) {
err = PTR_ERR(handler);
goto unlock;
}
hashinfo = handler->idiag_hashinfo;
err = -EINVAL;
if (req->idiag_family == AF_INET) {
sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
}
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
else if (req->idiag_family == AF_INET6) {
sk = inet6_lookup(&init_net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
req->id.idiag_if);
}
#endif
else {
goto unlock;
}
err = -ENOENT;
if (sk == NULL)
goto unlock;
err = -ESTALE;
if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
(u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
goto out;
err = -ENOMEM;
rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
sizeof(struct inet_diag_meminfo) +
handler->idiag_info_size + 64)),
GFP_KERNEL);
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req->idiag_ext,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
goto out;
}
err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
out:
if (sk) {
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_put((struct inet_timewait_sock *)sk);
else
sock_put(sk);
}
unlock:
inet_diag_unlock_handler(handler);
return err;
}
static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
{
int words = bits >> 5;
bits &= 0x1f;
if (words) {
if (memcmp(a1, a2, words << 2))
return 0;
}
if (bits) {
__be32 w1, w2;
__be32 mask;
w1 = a1[words];
w2 = a2[words];
mask = htonl((0xffffffff) << (32 - bits));
if ((w1 ^ w2) & mask)
return 0;
}
return 1;
}
static int inet_diag_bc_run(const void *bc, int len,
const struct inet_diag_entry *entry)
{
while (len > 0) {
int yes = 1;
const struct inet_diag_bc_op *op = bc;
switch (op->code) {
case INET_DIAG_BC_NOP:
break;
case INET_DIAG_BC_JMP:
yes = 0;
break;
case INET_DIAG_BC_S_GE:
yes = entry->sport >= op[1].no;
break;
case INET_DIAG_BC_S_LE:
yes = entry->sport <= op[1].no;
break;
case INET_DIAG_BC_D_GE:
yes = entry->dport >= op[1].no;
break;
case INET_DIAG_BC_D_LE:
yes = entry->dport <= op[1].no;
break;
case INET_DIAG_BC_AUTO:
yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
break;
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND: {
struct inet_diag_hostcond *cond;
__be32 *addr;
cond = (struct inet_diag_hostcond *)(op + 1);
if (cond->port != -1 &&
cond->port != (op->code == INET_DIAG_BC_S_COND ?
entry->sport : entry->dport)) {
yes = 0;
break;
}
if (cond->prefix_len == 0)
break;
if (op->code == INET_DIAG_BC_S_COND)
addr = entry->saddr;
else
addr = entry->daddr;
if (bitstring_match(addr, cond->addr,
cond->prefix_len))
break;
if (entry->family == AF_INET6 &&
cond->family == AF_INET) {
if (addr[0] == 0 && addr[1] == 0 &&
addr[2] == htonl(0xffff) &&
bitstring_match(addr + 3, cond->addr,
cond->prefix_len))
break;
}
yes = 0;
break;
}
}
if (yes) {
len -= op->yes;
bc += op->yes;
} else {
len -= op->no;
bc += op->no;
}
}
return len == 0;
}
static int valid_cc(const void *bc, int len, int cc)
{
while (len >= 0) {
const struct inet_diag_bc_op *op = bc;
if (cc > len)
return 0;
if (cc == len)
return 1;
if (op->yes < 4)
return 0;
len -= op->yes;
bc += op->yes;
}
return 0;
}
static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
{
const unsigned char *bc = bytecode;
int len = bytecode_len;
while (len > 0) {
struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
switch (op->code) {
case INET_DIAG_BC_AUTO:
case INET_DIAG_BC_S_COND:
case INET_DIAG_BC_D_COND:
case INET_DIAG_BC_S_GE:
case INET_DIAG_BC_S_LE:
case INET_DIAG_BC_D_GE:
case INET_DIAG_BC_D_LE:
if (op->yes < 4 || op->yes > len + 4)
return -EINVAL;
case INET_DIAG_BC_JMP:
if (op->no < 4 || op->no > len + 4)
return -EINVAL;
if (op->no < len &&
!valid_cc(bytecode, bytecode_len, len - op->no))
return -EINVAL;
break;
case INET_DIAG_BC_NOP:
if (op->yes < 4 || op->yes > len + 4)
return -EINVAL;
break;
default:
return -EINVAL;
}
bc += op->yes;
len -= op->yes;
}
return len == 0 ? 0 : -EINVAL;
}
static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
struct inet_diag_entry entry;
const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
sizeof(*r),
INET_DIAG_REQ_BYTECODE);
struct inet_sock *inet = inet_sk(sk);
entry.family = sk->sk_family;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (entry.family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
entry.saddr = np->rcv_saddr.s6_addr32;
entry.daddr = np->daddr.s6_addr32;
} else
#endif
{
entry.saddr = &inet->inet_rcv_saddr;
entry.daddr = &inet->inet_daddr;
}
entry.sport = inet->inet_num;
entry.dport = ntohs(inet->inet_dport);
entry.userlocks = sk->sk_userlocks;
if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
return 0;
}
return inet_csk_diag_fill(sk, skb, r->idiag_ext,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
struct sk_buff *skb,
struct netlink_callback *cb)
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
struct inet_diag_entry entry;
const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
sizeof(*r),
INET_DIAG_REQ_BYTECODE);
entry.family = tw->tw_family;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (tw->tw_family == AF_INET6) {
struct inet6_timewait_sock *tw6 =
inet6_twsk((struct sock *)tw);
entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
entry.daddr = tw6->tw_v6_daddr.s6_addr32;
} else
#endif
{
entry.saddr = &tw->tw_rcv_saddr;
entry.daddr = &tw->tw_daddr;
}
entry.sport = tw->tw_num;
entry.dport = ntohs(tw->tw_dport);
entry.userlocks = 0;
if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
return 0;
}
return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
struct request_sock *req, u32 pid, u32 seq,
const struct nlmsghdr *unlh)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
unsigned char *b = skb_tail_pointer(skb);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
nlh->nlmsg_flags = NLM_F_MULTI;
r = NLMSG_DATA(nlh);
r->idiag_family = sk->sk_family;
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
r->idiag_retrans = req->retrans;
r->id.idiag_if = sk->sk_bound_dev_if;
r->id.idiag_cookie[0] = (u32)(unsigned long)req;
r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
tmo = req->expires - jiffies;
if (tmo < 0)
tmo = 0;
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->rmt_port;
r->id.idiag_src[0] = ireq->loc_addr;
r->id.idiag_dst[0] = ireq->rmt_addr;
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = 0;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&inet6_rsk(req)->loc_addr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&inet6_rsk(req)->rmt_addr);
}
#endif
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
nlmsg_failure:
nlmsg_trim(skb, b);
return -1;
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
struct netlink_callback *cb)
{
struct inet_diag_entry entry;
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
const struct nlattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
int j, s_j;
int reqnum, s_reqnum;
int err = 0;
s_j = cb->args[3];
s_reqnum = cb->args[4];
if (s_j > 0)
s_j--;
entry.family = sk->sk_family;
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !lopt->qlen)
goto out;
if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
INET_DIAG_REQ_BYTECODE);
entry.sport = inet->inet_num;
entry.userlocks = sk->sk_userlocks;
}
for (j = s_j; j < lopt->nr_table_entries; j++) {
struct request_sock *req, *head = lopt->syn_table[j];
reqnum = 0;
for (req = head; req; reqnum++, req = req->dl_next) {
struct inet_request_sock *ireq = inet_rsk(req);
if (reqnum < s_reqnum)
continue;
if (r->id.idiag_dport != ireq->rmt_port &&
r->id.idiag_dport)
continue;
if (bc) {
entry.saddr =
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
(entry.family == AF_INET6) ?
inet6_rsk(req)->loc_addr.s6_addr32 :
#endif
&ireq->loc_addr;
entry.daddr =
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
(entry.family == AF_INET6) ?
inet6_rsk(req)->rmt_addr.s6_addr32 :
#endif
&ireq->rmt_addr;
entry.dport = ntohs(ireq->rmt_port);
if (!inet_diag_bc_run(nla_data(bc),
nla_len(bc), &entry))
continue;
}
err = inet_diag_fill_req(skb, sk, req,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, cb->nlh);
if (err < 0) {
cb->args[3] = j + 1;
cb->args[4] = reqnum;
goto out;
}
}
s_reqnum = 0;
}
out:
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err;
}
static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int i, num;
int s_i, s_num;
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
const struct inet_diag_handler *handler;
struct inet_hashinfo *hashinfo;
handler = inet_diag_lock_handler(cb->nlh->nlmsg_type);
if (IS_ERR(handler))
goto unlock;
hashinfo = handler->idiag_hashinfo;
s_i = cb->args[1];
s_num = num = cb->args[2];
if (cb->args[0] == 0) {
if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
goto skip_listen_ht;
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct inet_listen_hashbucket *ilb;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock_bh(&ilb->lock);
sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) {
num++;
continue;
}
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_listen;
if (!(r->idiag_states & TCPF_LISTEN) ||
r->id.idiag_dport ||
cb->args[3] > 0)
goto syn_recv;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
syn_recv:
if (!(r->idiag_states & TCPF_SYN_RECV))
goto next_listen;
if (inet_diag_dump_reqs(skb, sk, cb) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
next_listen:
cb->args[3] = 0;
cb->args[4] = 0;
++num;
}
spin_unlock_bh(&ilb->lock);
s_num = 0;
cb->args[3] = 0;
cb->args[4] = 0;
}
skip_listen_ht:
cb->args[0] = 1;
s_i = num = s_num = 0;
}
if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
goto unlock;
for (i = s_i; i <= hashinfo->ehash_mask; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct sock *sk;
struct hlist_nulls_node *node;
num = 0;
if (hlist_nulls_empty(&head->chain) &&
hlist_nulls_empty(&head->twchain))
continue;
if (i > s_i)
s_num = 0;
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
goto next_normal;
if (r->id.idiag_sport != inet->inet_sport &&
r->id.idiag_sport)
goto next_normal;
if (r->id.idiag_dport != inet->inet_dport &&
r->id.idiag_dport)
goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_normal:
++num;
}
if (r->idiag_states & TCPF_TIME_WAIT) {
struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
&head->twchain) {
if (num < s_num)
goto next_dying;
if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport)
goto next_dying;
if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
spin_unlock_bh(lock);
}
done:
cb->args[1] = i;
cb->args[2] = num;
unlock:
inet_diag_unlock_handler(handler);
return skb->len;
}
static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int hdrlen = sizeof(struct inet_diag_req);
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
return -EINVAL;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
if (nlmsg_attrlen(nlh, hdrlen)) {
struct nlattr *attr;
attr = nlmsg_find_attr(nlh, hdrlen,
INET_DIAG_REQ_BYTECODE);
if (attr == NULL ||
nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
return -EINVAL;
}
return netlink_dump_start(idiagnl, skb, nlh,
inet_diag_dump, NULL);
}
return inet_diag_get_exact(skb, nlh);
}
static DEFINE_MUTEX(inet_diag_mutex);
static void inet_diag_rcv(struct sk_buff *skb)
{
mutex_lock(&inet_diag_mutex);
netlink_rcv_skb(skb, &inet_diag_rcv_msg);
mutex_unlock(&inet_diag_mutex);
}
int inet_diag_register(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
int err = -EINVAL;
if (type >= INET_DIAG_GETSOCK_MAX)
goto out;
mutex_lock(&inet_diag_table_mutex);
err = -EEXIST;
if (inet_diag_table[type] == NULL) {
inet_diag_table[type] = h;
err = 0;
}
mutex_unlock(&inet_diag_table_mutex);
out:
return err;
}
EXPORT_SYMBOL_GPL(inet_diag_register);
void inet_diag_unregister(const struct inet_diag_handler *h)
{
const __u16 type = h->idiag_type;
if (type >= INET_DIAG_GETSOCK_MAX)
return;
mutex_lock(&inet_diag_table_mutex);
inet_diag_table[type] = NULL;
mutex_unlock(&inet_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
static int __init inet_diag_init(void)
{
const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX *
sizeof(struct inet_diag_handler *));
int err = -ENOMEM;
inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
if (!inet_diag_table)
goto out;
idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0,
inet_diag_rcv, NULL, THIS_MODULE);
if (idiagnl == NULL)
goto out_free_table;
err = 0;
out:
return err;
out_free_table:
kfree(inet_diag_table);
goto out;
}
static void __exit inet_diag_exit(void)
{
netlink_kernel_release(idiagnl);
kfree(inet_diag_table);
}
module_init(inet_diag_init);
module_exit(inet_diag_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG);
| gpl-2.0 |
JustBeYou/linux | drivers/media/i2c/saa7115.c | 436 | 56116 | /* saa711x - Philips SAA711x video decoder driver
* This driver can work with saa7111, saa7111a, saa7113, saa7114,
* saa7115 and saa7118.
*
* Based on saa7114 driver by Maxim Yevtyushkin, which is based on
* the saa7111 driver by Dave Perks.
*
* Copyright (C) 1998 Dave Perks <dperks@ibm.net>
* Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com>
*
* Slight changes for video timing and attachment output by
* Wolfgang Scherr <scherr@net4you.net>
*
* Moved over to the linux >= 2.4.x i2c protocol (1/1/2003)
* by Ronald Bultje <rbultje@ronald.bitfreak.net>
*
* Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com>
* (2/17/2003)
*
* VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
*
* Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
* SAA7111, SAA7113 and SAA7118 support
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "saa711x_regs.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/saa7115.h>
#include <asm/div64.h>
#define VRES_60HZ (480+16)
MODULE_DESCRIPTION("Philips SAA7111/SAA7113/SAA7114/SAA7115/SAA7118 video decoder driver");
MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, "
"Hans Verkuil, Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
enum saa711x_model {
SAA7111A,
SAA7111,
SAA7113,
GM7113C,
SAA7114,
SAA7115,
SAA7118,
};
struct saa711x_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct {
/* chroma gain control cluster */
struct v4l2_ctrl *agc;
struct v4l2_ctrl *gain;
};
v4l2_std_id std;
int input;
int output;
int enable;
int radio;
int width;
int height;
enum saa711x_model ident;
u32 audclk_freq;
u32 crystal_freq;
bool ucgc;
u8 cgcdiv;
bool apll;
bool double_asclk;
};
static inline struct saa711x_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct saa711x_state, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct saa711x_state, hdl)->sd;
}
/* ----------------------------------------------------------------------- */
static inline int saa711x_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_write_byte_data(client, reg, value);
}
/* Sanity routine to check if a register is present */
static int saa711x_has_reg(const int id, const u8 reg)
{
if (id == SAA7111)
return reg < 0x20 && reg != 0x01 && reg != 0x0f &&
(reg < 0x13 || reg > 0x19) && reg != 0x1d && reg != 0x1e;
if (id == SAA7111A)
return reg < 0x20 && reg != 0x01 && reg != 0x0f &&
reg != 0x14 && reg != 0x18 && reg != 0x19 &&
reg != 0x1d && reg != 0x1e;
/* common for saa7113/4/5/8 */
if (unlikely((reg >= 0x3b && reg <= 0x3f) || reg == 0x5c || reg == 0x5f ||
reg == 0xa3 || reg == 0xa7 || reg == 0xab || reg == 0xaf || (reg >= 0xb5 && reg <= 0xb7) ||
reg == 0xd3 || reg == 0xd7 || reg == 0xdb || reg == 0xdf || (reg >= 0xe5 && reg <= 0xe7) ||
reg == 0x82 || (reg >= 0x89 && reg <= 0x8e)))
return 0;
switch (id) {
case GM7113C:
return reg != 0x14 && (reg < 0x18 || reg > 0x1e) && reg < 0x20;
case SAA7113:
return reg != 0x14 && (reg < 0x18 || reg > 0x1e) && (reg < 0x20 || reg > 0x3f) &&
reg != 0x5d && reg < 0x63;
case SAA7114:
return (reg < 0x1a || reg > 0x1e) && (reg < 0x20 || reg > 0x2f) &&
(reg < 0x63 || reg > 0x7f) && reg != 0x33 && reg != 0x37 &&
reg != 0x81 && reg < 0xf0;
case SAA7115:
return (reg < 0x20 || reg > 0x2f) && reg != 0x65 && (reg < 0xfc || reg > 0xfe);
case SAA7118:
return (reg < 0x1a || reg > 0x1d) && (reg < 0x20 || reg > 0x22) &&
(reg < 0x26 || reg > 0x28) && reg != 0x33 && reg != 0x37 &&
(reg < 0x63 || reg > 0x7f) && reg != 0x81 && reg < 0xf0;
}
return 1;
}
static int saa711x_writeregs(struct v4l2_subdev *sd, const unsigned char *regs)
{
struct saa711x_state *state = to_state(sd);
unsigned char reg, data;
while (*regs != 0x00) {
reg = *(regs++);
data = *(regs++);
/* According with datasheets, reserved regs should be
filled with 0 - seems better not to touch on they */
if (saa711x_has_reg(state->ident, reg)) {
if (saa711x_write(sd, reg, data) < 0)
return -1;
} else {
v4l2_dbg(1, debug, sd, "tried to access reserved reg 0x%02x\n", reg);
}
}
return 0;
}
static inline int saa711x_read(struct v4l2_subdev *sd, u8 reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte_data(client, reg);
}
/* ----------------------------------------------------------------------- */
/* SAA7111 initialization table */
static const unsigned char saa7111_init[] = {
R_01_INC_DELAY, 0x00, /* reserved */
/*front end */
R_02_INPUT_CNTL_1, 0xd0, /* FUSE=3, GUDL=2, MODE=0 */
R_03_INPUT_CNTL_2, 0x23, /* HLNRS=0, VBSL=1, WPOFF=0, HOLDG=0,
* GAFIX=0, GAI1=256, GAI2=256 */
R_04_INPUT_CNTL_3, 0x00, /* GAI1=256 */
R_05_INPUT_CNTL_4, 0x00, /* GAI2=256 */
/* decoder */
R_06_H_SYNC_START, 0xf3, /* HSB at 13(50Hz) / 17(60Hz)
* pixels after end of last line */
R_07_H_SYNC_STOP, 0xe8, /* HSS seems to be needed to
* work with NTSC, too */
R_08_SYNC_CNTL, 0xc8, /* AUFD=1, FSEL=1, EXFIL=0,
* VTRC=1, HPLL=0, VNOI=0 */
R_09_LUMA_CNTL, 0x01, /* BYPS=0, PREF=0, BPSS=0,
* VBLB=0, UPTCV=0, APER=1 */
R_0A_LUMA_BRIGHT_CNTL, 0x80,
R_0B_LUMA_CONTRAST_CNTL, 0x47, /* 0b - CONT=1.109 */
R_0C_CHROMA_SAT_CNTL, 0x40,
R_0D_CHROMA_HUE_CNTL, 0x00,
R_0E_CHROMA_CNTL_1, 0x01, /* 0e - CDTO=0, CSTD=0, DCCF=0,
* FCTC=0, CHBW=1 */
R_0F_CHROMA_GAIN_CNTL, 0x00, /* reserved */
R_10_CHROMA_CNTL_2, 0x48, /* 10 - OFTS=1, HDEL=0, VRLN=1, YDEL=0 */
R_11_MODE_DELAY_CNTL, 0x1c, /* 11 - GPSW=0, CM99=0, FECO=0, COMPO=1,
* OEYC=1, OEHV=1, VIPB=0, COLO=0 */
R_12_RT_SIGNAL_CNTL, 0x00, /* 12 - output control 2 */
R_13_RT_X_PORT_OUT_CNTL, 0x00, /* 13 - output control 3 */
R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
R_15_VGATE_START_FID_CHG, 0x00,
R_16_VGATE_STOP, 0x00,
R_17_MISC_VGATE_CONF_AND_MSB, 0x00,
0x00, 0x00
};
/*
* This table has one illegal value, and some values that are not
* correct according to the datasheet initialization table.
*
* If you need a table with legal/default values tell the driver in
* i2c_board_info.platform_data, and you will get the gm7113c_init
* table instead.
*/
/* SAA7113 Init codes */
static const unsigned char saa7113_init[] = {
R_01_INC_DELAY, 0x08,
R_02_INPUT_CNTL_1, 0xc2,
R_03_INPUT_CNTL_2, 0x30,
R_04_INPUT_CNTL_3, 0x00,
R_05_INPUT_CNTL_4, 0x00,
R_06_H_SYNC_START, 0x89, /* Illegal value -119,
* min. value = -108 (0x94) */
R_07_H_SYNC_STOP, 0x0d,
R_08_SYNC_CNTL, 0x88, /* Not datasheet default.
* HTC = VTR mode, should be 0x98 */
R_09_LUMA_CNTL, 0x01,
R_0A_LUMA_BRIGHT_CNTL, 0x80,
R_0B_LUMA_CONTRAST_CNTL, 0x47,
R_0C_CHROMA_SAT_CNTL, 0x40,
R_0D_CHROMA_HUE_CNTL, 0x00,
R_0E_CHROMA_CNTL_1, 0x01,
R_0F_CHROMA_GAIN_CNTL, 0x2a,
R_10_CHROMA_CNTL_2, 0x08, /* Not datsheet default.
* VRLN enabled, should be 0x00 */
R_11_MODE_DELAY_CNTL, 0x0c,
R_12_RT_SIGNAL_CNTL, 0x07, /* Not datasheet default,
* should be 0x01 */
R_13_RT_X_PORT_OUT_CNTL, 0x00,
R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
R_15_VGATE_START_FID_CHG, 0x00,
R_16_VGATE_STOP, 0x00,
R_17_MISC_VGATE_CONF_AND_MSB, 0x00,
0x00, 0x00
};
/*
* GM7113C is a clone of the SAA7113 chip
* This init table is copied out of the saa7113 datasheet.
* In R_08 we enable "Automatic Field Detection" [AUFD],
* this is disabled when saa711x_set_v4lstd is called.
*/
static const unsigned char gm7113c_init[] = {
R_01_INC_DELAY, 0x08,
R_02_INPUT_CNTL_1, 0xc0,
R_03_INPUT_CNTL_2, 0x33,
R_04_INPUT_CNTL_3, 0x00,
R_05_INPUT_CNTL_4, 0x00,
R_06_H_SYNC_START, 0xe9,
R_07_H_SYNC_STOP, 0x0d,
R_08_SYNC_CNTL, 0x98,
R_09_LUMA_CNTL, 0x01,
R_0A_LUMA_BRIGHT_CNTL, 0x80,
R_0B_LUMA_CONTRAST_CNTL, 0x47,
R_0C_CHROMA_SAT_CNTL, 0x40,
R_0D_CHROMA_HUE_CNTL, 0x00,
R_0E_CHROMA_CNTL_1, 0x01,
R_0F_CHROMA_GAIN_CNTL, 0x2a,
R_10_CHROMA_CNTL_2, 0x00,
R_11_MODE_DELAY_CNTL, 0x0c,
R_12_RT_SIGNAL_CNTL, 0x01,
R_13_RT_X_PORT_OUT_CNTL, 0x00,
R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
R_15_VGATE_START_FID_CHG, 0x00,
R_16_VGATE_STOP, 0x00,
R_17_MISC_VGATE_CONF_AND_MSB, 0x00,
0x00, 0x00
};
/* If a value differs from the Hauppauge driver values, then the comment starts with
'was 0xXX' to denote the Hauppauge value. Otherwise the value is identical to what the
Hauppauge driver sets. */
/* SAA7114 and SAA7115 initialization table */
static const unsigned char saa7115_init_auto_input[] = {
/* Front-End Part */
R_01_INC_DELAY, 0x48, /* white peak control disabled */
R_03_INPUT_CNTL_2, 0x20, /* was 0x30. 0x20: long vertical blanking */
R_04_INPUT_CNTL_3, 0x90, /* analog gain set to 0 */
R_05_INPUT_CNTL_4, 0x90, /* analog gain set to 0 */
/* Decoder Part */
R_06_H_SYNC_START, 0xeb, /* horiz sync begin = -21 */
R_07_H_SYNC_STOP, 0xe0, /* horiz sync stop = -17 */
R_09_LUMA_CNTL, 0x53, /* 0x53, was 0x56 for 60hz. luminance control */
R_0A_LUMA_BRIGHT_CNTL, 0x80, /* was 0x88. decoder brightness, 0x80 is itu standard */
R_0B_LUMA_CONTRAST_CNTL, 0x44, /* was 0x48. decoder contrast, 0x44 is itu standard */
R_0C_CHROMA_SAT_CNTL, 0x40, /* was 0x47. decoder saturation, 0x40 is itu standard */
R_0D_CHROMA_HUE_CNTL, 0x00,
R_0F_CHROMA_GAIN_CNTL, 0x00, /* use automatic gain */
R_10_CHROMA_CNTL_2, 0x06, /* chroma: active adaptive combfilter */
R_11_MODE_DELAY_CNTL, 0x00,
R_12_RT_SIGNAL_CNTL, 0x9d, /* RTS0 output control: VGATE */
R_13_RT_X_PORT_OUT_CNTL, 0x80, /* ITU656 standard mode, RTCO output enable RTCE */
R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
R_18_RAW_DATA_GAIN_CNTL, 0x40, /* gain 0x00 = nominal */
R_19_RAW_DATA_OFF_CNTL, 0x80,
R_1A_COLOR_KILL_LVL_CNTL, 0x77, /* recommended value */
R_1B_MISC_TVVCRDET, 0x42, /* recommended value */
R_1C_ENHAN_COMB_CTRL1, 0xa9, /* recommended value */
R_1D_ENHAN_COMB_CTRL2, 0x01, /* recommended value */
R_80_GLOBAL_CNTL_1, 0x0, /* No tasks enabled at init */
/* Power Device Control */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset device */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* set device programmed, all in operational mode */
0x00, 0x00
};
/* Used to reset saa7113, saa7114 and saa7115 */
static const unsigned char saa7115_cfg_reset_scaler[] = {
R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x00, /* disable I-port output */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */
R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* enable I-port output */
0x00, 0x00
};
/* ============== SAA7715 VIDEO templates ============= */
static const unsigned char saa7115_cfg_60hz_video[] = {
R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
R_15_VGATE_START_FID_CHG, 0x03,
R_16_VGATE_STOP, 0x11,
R_17_MISC_VGATE_CONF_AND_MSB, 0x9c,
R_08_SYNC_CNTL, 0x68, /* 0xBO: auto detection, 0x68 = NTSC */
R_0E_CHROMA_CNTL_1, 0x07, /* video autodetection is on */
R_5A_V_OFF_FOR_SLICER, 0x06, /* standard 60hz value for ITU656 line counting */
/* Task A */
R_90_A_TASK_HANDLING_CNTL, 0x80,
R_91_A_X_PORT_FORMATS_AND_CONF, 0x48,
R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40,
R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84,
/* hoffset low (input), 0x0002 is minimum */
R_94_A_HORIZ_INPUT_WINDOW_START, 0x01,
R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00,
/* hsize low (input), 0x02d0 = 720 */
R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0,
R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02,
R_98_A_VERT_INPUT_WINDOW_START, 0x05,
R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00,
R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x0c,
R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00,
R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0,
R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05,
R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x0c,
R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00,
/* Task B */
R_C0_B_TASK_HANDLING_CNTL, 0x00,
R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08,
R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00,
R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80,
/* 0x0002 is minimum */
R_C4_B_HORIZ_INPUT_WINDOW_START, 0x02,
R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00,
/* 0x02d0 = 720 */
R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0,
R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02,
/* vwindow start 0x12 = 18 */
R_C8_B_VERT_INPUT_WINDOW_START, 0x12,
R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00,
/* vwindow length 0xf8 = 248 */
R_CA_B_VERT_INPUT_WINDOW_LENGTH, VRES_60HZ>>1,
R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, VRES_60HZ>>9,
/* hwindow 0x02d0 = 720 */
R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0,
R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02,
R_F0_LFCO_PER_LINE, 0xad, /* Set PLL Register. 60hz 525 lines per frame, 27 MHz */
R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0 */
R_F5_PULSGEN_LINE_LENGTH, 0xad,
R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01,
0x00, 0x00
};
static const unsigned char saa7115_cfg_50hz_video[] = {
R_80_GLOBAL_CNTL_1, 0x00,
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
R_15_VGATE_START_FID_CHG, 0x37, /* VGATE start */
R_16_VGATE_STOP, 0x16,
R_17_MISC_VGATE_CONF_AND_MSB, 0x99,
R_08_SYNC_CNTL, 0x28, /* 0x28 = PAL */
R_0E_CHROMA_CNTL_1, 0x07,
R_5A_V_OFF_FOR_SLICER, 0x03, /* standard 50hz value */
/* Task A */
R_90_A_TASK_HANDLING_CNTL, 0x81,
R_91_A_X_PORT_FORMATS_AND_CONF, 0x48,
R_92_A_X_PORT_INPUT_REFERENCE_SIGNAL, 0x40,
R_93_A_I_PORT_OUTPUT_FORMATS_AND_CONF, 0x84,
/* This is weird: the datasheet says that you should use 2 as the minimum value, */
/* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */
/* hoffset low (input), 0x0002 is minimum */
R_94_A_HORIZ_INPUT_WINDOW_START, 0x00,
R_95_A_HORIZ_INPUT_WINDOW_START_MSB, 0x00,
/* hsize low (input), 0x02d0 = 720 */
R_96_A_HORIZ_INPUT_WINDOW_LENGTH, 0xd0,
R_97_A_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02,
R_98_A_VERT_INPUT_WINDOW_START, 0x03,
R_99_A_VERT_INPUT_WINDOW_START_MSB, 0x00,
/* vsize 0x12 = 18 */
R_9A_A_VERT_INPUT_WINDOW_LENGTH, 0x12,
R_9B_A_VERT_INPUT_WINDOW_LENGTH_MSB, 0x00,
/* hsize 0x05a0 = 1440 */
R_9C_A_HORIZ_OUTPUT_WINDOW_LENGTH, 0xa0,
R_9D_A_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x05, /* hsize hi (output) */
R_9E_A_VERT_OUTPUT_WINDOW_LENGTH, 0x12, /* vsize low (output), 0x12 = 18 */
R_9F_A_VERT_OUTPUT_WINDOW_LENGTH_MSB, 0x00, /* vsize hi (output) */
/* Task B */
R_C0_B_TASK_HANDLING_CNTL, 0x00,
R_C1_B_X_PORT_FORMATS_AND_CONF, 0x08,
R_C2_B_INPUT_REFERENCE_SIGNAL_DEFINITION, 0x00,
R_C3_B_I_PORT_FORMATS_AND_CONF, 0x80,
/* This is weird: the datasheet says that you should use 2 as the minimum value, */
/* but Hauppauge uses 0, and changing that to 2 causes indeed problems (for 50hz) */
/* hoffset low (input), 0x0002 is minimum. See comment above. */
R_C4_B_HORIZ_INPUT_WINDOW_START, 0x00,
R_C5_B_HORIZ_INPUT_WINDOW_START_MSB, 0x00,
/* hsize 0x02d0 = 720 */
R_C6_B_HORIZ_INPUT_WINDOW_LENGTH, 0xd0,
R_C7_B_HORIZ_INPUT_WINDOW_LENGTH_MSB, 0x02,
/* voffset 0x16 = 22 */
R_C8_B_VERT_INPUT_WINDOW_START, 0x16,
R_C9_B_VERT_INPUT_WINDOW_START_MSB, 0x00,
/* vsize 0x0120 = 288 */
R_CA_B_VERT_INPUT_WINDOW_LENGTH, 0x20,
R_CB_B_VERT_INPUT_WINDOW_LENGTH_MSB, 0x01,
/* hsize 0x02d0 = 720 */
R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH, 0xd0,
R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB, 0x02,
R_F0_LFCO_PER_LINE, 0xb0, /* Set PLL Register. 50hz 625 lines per frame, 27 MHz */
R_F1_P_I_PARAM_SELECT, 0x05, /* low bit with 0xF0, (was 0x05) */
R_F5_PULSGEN_LINE_LENGTH, 0xb0,
R_F6_PULSE_A_POS_LSB_AND_PULSEGEN_CONFIG, 0x01,
0x00, 0x00
};
/* ============== SAA7715 VIDEO templates (end) ======= */
static const unsigned char saa7115_cfg_vbi_on[] = {
R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
R_80_GLOBAL_CNTL_1, 0x30, /* Activate both tasks */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */
R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */
0x00, 0x00
};
static const unsigned char saa7115_cfg_vbi_off[] = {
R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
R_80_GLOBAL_CNTL_1, 0x20, /* Activate only task "B" */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0, /* activate scaler */
R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01, /* Enable I-port output */
0x00, 0x00
};
static const unsigned char saa7115_init_misc[] = {
R_81_V_SYNC_FLD_ID_SRC_SEL_AND_RETIMED_V_F, 0x01,
R_83_X_PORT_I_O_ENA_AND_OUT_CLK, 0x01,
R_84_I_PORT_SIGNAL_DEF, 0x20,
R_85_I_PORT_SIGNAL_POLAR, 0x21,
R_86_I_PORT_FIFO_FLAG_CNTL_AND_ARBIT, 0xc5,
R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, 0x01,
/* Task A */
R_A0_A_HORIZ_PRESCALING, 0x01,
R_A1_A_ACCUMULATION_LENGTH, 0x00,
R_A2_A_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00,
/* Configure controls at nominal value*/
R_A4_A_LUMA_BRIGHTNESS_CNTL, 0x80,
R_A5_A_LUMA_CONTRAST_CNTL, 0x40,
R_A6_A_CHROMA_SATURATION_CNTL, 0x40,
/* note: 2 x zoom ensures that VBI lines have same length as video lines. */
R_A8_A_HORIZ_LUMA_SCALING_INC, 0x00,
R_A9_A_HORIZ_LUMA_SCALING_INC_MSB, 0x02,
R_AA_A_HORIZ_LUMA_PHASE_OFF, 0x00,
/* must be horiz lum scaling / 2 */
R_AC_A_HORIZ_CHROMA_SCALING_INC, 0x00,
R_AD_A_HORIZ_CHROMA_SCALING_INC_MSB, 0x01,
/* must be offset luma / 2 */
R_AE_A_HORIZ_CHROMA_PHASE_OFF, 0x00,
R_B0_A_VERT_LUMA_SCALING_INC, 0x00,
R_B1_A_VERT_LUMA_SCALING_INC_MSB, 0x04,
R_B2_A_VERT_CHROMA_SCALING_INC, 0x00,
R_B3_A_VERT_CHROMA_SCALING_INC_MSB, 0x04,
R_B4_A_VERT_SCALING_MODE_CNTL, 0x01,
R_B8_A_VERT_CHROMA_PHASE_OFF_00, 0x00,
R_B9_A_VERT_CHROMA_PHASE_OFF_01, 0x00,
R_BA_A_VERT_CHROMA_PHASE_OFF_10, 0x00,
R_BB_A_VERT_CHROMA_PHASE_OFF_11, 0x00,
R_BC_A_VERT_LUMA_PHASE_OFF_00, 0x00,
R_BD_A_VERT_LUMA_PHASE_OFF_01, 0x00,
R_BE_A_VERT_LUMA_PHASE_OFF_10, 0x00,
R_BF_A_VERT_LUMA_PHASE_OFF_11, 0x00,
/* Task B */
R_D0_B_HORIZ_PRESCALING, 0x01,
R_D1_B_ACCUMULATION_LENGTH, 0x00,
R_D2_B_PRESCALER_DC_GAIN_AND_FIR_PREFILTER, 0x00,
/* Configure controls at nominal value*/
R_D4_B_LUMA_BRIGHTNESS_CNTL, 0x80,
R_D5_B_LUMA_CONTRAST_CNTL, 0x40,
R_D6_B_CHROMA_SATURATION_CNTL, 0x40,
/* hor lum scaling 0x0400 = 1 */
R_D8_B_HORIZ_LUMA_SCALING_INC, 0x00,
R_D9_B_HORIZ_LUMA_SCALING_INC_MSB, 0x04,
R_DA_B_HORIZ_LUMA_PHASE_OFF, 0x00,
/* must be hor lum scaling / 2 */
R_DC_B_HORIZ_CHROMA_SCALING, 0x00,
R_DD_B_HORIZ_CHROMA_SCALING_MSB, 0x02,
/* must be offset luma / 2 */
R_DE_B_HORIZ_PHASE_OFFSET_CRHOMA, 0x00,
R_E0_B_VERT_LUMA_SCALING_INC, 0x00,
R_E1_B_VERT_LUMA_SCALING_INC_MSB, 0x04,
R_E2_B_VERT_CHROMA_SCALING_INC, 0x00,
R_E3_B_VERT_CHROMA_SCALING_INC_MSB, 0x04,
R_E4_B_VERT_SCALING_MODE_CNTL, 0x01,
R_E8_B_VERT_CHROMA_PHASE_OFF_00, 0x00,
R_E9_B_VERT_CHROMA_PHASE_OFF_01, 0x00,
R_EA_B_VERT_CHROMA_PHASE_OFF_10, 0x00,
R_EB_B_VERT_CHROMA_PHASE_OFF_11, 0x00,
R_EC_B_VERT_LUMA_PHASE_OFF_00, 0x00,
R_ED_B_VERT_LUMA_PHASE_OFF_01, 0x00,
R_EE_B_VERT_LUMA_PHASE_OFF_10, 0x00,
R_EF_B_VERT_LUMA_PHASE_OFF_11, 0x00,
R_F2_NOMINAL_PLL2_DTO, 0x50, /* crystal clock = 24.576 MHz, target = 27MHz */
R_F3_PLL_INCREMENT, 0x46,
R_F4_PLL2_STATUS, 0x00,
R_F7_PULSE_A_POS_MSB, 0x4b, /* not the recommended settings! */
R_F8_PULSE_B_POS, 0x00,
R_F9_PULSE_B_POS_MSB, 0x4b,
R_FA_PULSE_C_POS, 0x00,
R_FB_PULSE_C_POS_MSB, 0x4b,
/* PLL2 lock detection settings: 71 lines 50% phase error */
R_FF_S_PLL_MAX_PHASE_ERR_THRESH_NUM_LINES, 0x88,
/* Turn off VBI */
R_40_SLICER_CNTL_1, 0x20, /* No framing code errors allowed. */
R_41_LCR_BASE, 0xff,
R_41_LCR_BASE+1, 0xff,
R_41_LCR_BASE+2, 0xff,
R_41_LCR_BASE+3, 0xff,
R_41_LCR_BASE+4, 0xff,
R_41_LCR_BASE+5, 0xff,
R_41_LCR_BASE+6, 0xff,
R_41_LCR_BASE+7, 0xff,
R_41_LCR_BASE+8, 0xff,
R_41_LCR_BASE+9, 0xff,
R_41_LCR_BASE+10, 0xff,
R_41_LCR_BASE+11, 0xff,
R_41_LCR_BASE+12, 0xff,
R_41_LCR_BASE+13, 0xff,
R_41_LCR_BASE+14, 0xff,
R_41_LCR_BASE+15, 0xff,
R_41_LCR_BASE+16, 0xff,
R_41_LCR_BASE+17, 0xff,
R_41_LCR_BASE+18, 0xff,
R_41_LCR_BASE+19, 0xff,
R_41_LCR_BASE+20, 0xff,
R_41_LCR_BASE+21, 0xff,
R_41_LCR_BASE+22, 0xff,
R_58_PROGRAM_FRAMING_CODE, 0x40,
R_59_H_OFF_FOR_SLICER, 0x47,
R_5B_FLD_OFF_AND_MSB_FOR_H_AND_V_OFF, 0x83,
R_5D_DID, 0xbd,
R_5E_SDID, 0x35,
R_02_INPUT_CNTL_1, 0xc4, /* input tuner -> input 4, amplifier active */
R_80_GLOBAL_CNTL_1, 0x20, /* enable task B */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0,
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xf0,
0x00, 0x00
};
static int saa711x_odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static int saa711x_decode_vps(u8 *dst, u8 *p)
{
static const u8 biphase_tbl[] = {
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87,
0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3,
0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85,
0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1,
0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86,
0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2,
0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84,
0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0,
0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
};
int i;
u8 c, err = 0;
for (i = 0; i < 2 * 13; i += 2) {
err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
c = (biphase_tbl[p[i + 1]] & 0xf) | ((biphase_tbl[p[i]] & 0xf) << 4);
dst[i / 2] = c;
}
return err & 0xf0;
}
static int saa711x_decode_wss(u8 *p)
{
static const int wss_bits[8] = {
0, 0, 0, 1, 0, 1, 1, 1
};
unsigned char parity;
int wss = 0;
int i;
for (i = 0; i < 16; i++) {
int b1 = wss_bits[p[i] & 7];
int b2 = wss_bits[(p[i] >> 3) & 7];
if (b1 == b2)
return -1;
wss |= b2 << i;
}
parity = wss & 15;
parity ^= parity >> 2;
parity ^= parity >> 1;
if (!(parity & 1))
return -1;
return wss;
}
static int saa711x_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
struct saa711x_state *state = to_state(sd);
u32 acpf;
u32 acni;
u32 hz;
u64 f;
u8 acc = 0; /* reg 0x3a, audio clock control */
/* Checks for chips that don't have audio clock (saa7111, saa7113) */
if (!saa711x_has_reg(state->ident, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD))
return 0;
v4l2_dbg(1, debug, sd, "set audio clock freq: %d\n", freq);
/* sanity check */
if (freq < 32000 || freq > 48000)
return -EINVAL;
/* hz is the refresh rate times 100 */
hz = (state->std & V4L2_STD_525_60) ? 5994 : 5000;
/* acpf = (256 * freq) / field_frequency == (256 * 100 * freq) / hz */
acpf = (25600 * freq) / hz;
/* acni = (256 * freq * 2^23) / crystal_frequency =
(freq * 2^(8+23)) / crystal_frequency =
(freq << 31) / crystal_frequency */
f = freq;
f = f << 31;
do_div(f, state->crystal_freq);
acni = f;
if (state->ucgc) {
acpf = acpf * state->cgcdiv / 16;
acni = acni * state->cgcdiv / 16;
acc = 0x80;
if (state->cgcdiv == 3)
acc |= 0x40;
}
if (state->apll)
acc |= 0x08;
if (state->double_asclk) {
acpf <<= 1;
acni <<= 1;
}
saa711x_write(sd, R_38_CLK_RATIO_AMXCLK_TO_ASCLK, 0x03);
saa711x_write(sd, R_39_CLK_RATIO_ASCLK_TO_ALRCLK, 0x10 << state->double_asclk);
saa711x_write(sd, R_3A_AUD_CLK_GEN_BASIC_SETUP, acc);
saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD, acpf & 0xff);
saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+1,
(acpf >> 8) & 0xff);
saa711x_write(sd, R_30_AUD_MAST_CLK_CYCLES_PER_FIELD+2,
(acpf >> 16) & 0x03);
saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC, acni & 0xff);
saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+1, (acni >> 8) & 0xff);
saa711x_write(sd, R_34_AUD_MAST_CLK_NOMINAL_INC+2, (acni >> 16) & 0x3f);
state->audclk_freq = freq;
return 0;
}
static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct saa711x_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_CHROMA_AGC:
/* chroma gain cluster */
if (state->agc->val)
state->gain->val =
saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
break;
}
return 0;
}
static int saa711x_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct saa711x_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
saa711x_write(sd, R_0A_LUMA_BRIGHT_CNTL, ctrl->val);
break;
case V4L2_CID_CONTRAST:
saa711x_write(sd, R_0B_LUMA_CONTRAST_CNTL, ctrl->val);
break;
case V4L2_CID_SATURATION:
saa711x_write(sd, R_0C_CHROMA_SAT_CNTL, ctrl->val);
break;
case V4L2_CID_HUE:
saa711x_write(sd, R_0D_CHROMA_HUE_CNTL, ctrl->val);
break;
case V4L2_CID_CHROMA_AGC:
/* chroma gain cluster */
if (state->agc->val)
saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val);
else
saa711x_write(sd, R_0F_CHROMA_GAIN_CNTL, state->gain->val | 0x80);
break;
default:
return -EINVAL;
}
return 0;
}
static int saa711x_set_size(struct v4l2_subdev *sd, int width, int height)
{
struct saa711x_state *state = to_state(sd);
int HPSC, HFSC;
int VSCY;
int res;
int is_50hz = state->std & V4L2_STD_625_50;
int Vsrc = is_50hz ? 576 : 480;
v4l2_dbg(1, debug, sd, "decoder set size to %ix%i\n", width, height);
/* FIXME need better bounds checking here */
if ((width < 1) || (width > 1440))
return -EINVAL;
if ((height < 1) || (height > Vsrc))
return -EINVAL;
if (!saa711x_has_reg(state->ident, R_D0_B_HORIZ_PRESCALING)) {
/* Decoder only supports 720 columns and 480 or 576 lines */
if (width != 720)
return -EINVAL;
if (height != Vsrc)
return -EINVAL;
}
state->width = width;
state->height = height;
if (!saa711x_has_reg(state->ident, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH))
return 0;
/* probably have a valid size, let's set it */
/* Set output width/height */
/* width */
saa711x_write(sd, R_CC_B_HORIZ_OUTPUT_WINDOW_LENGTH,
(u8) (width & 0xff));
saa711x_write(sd, R_CD_B_HORIZ_OUTPUT_WINDOW_LENGTH_MSB,
(u8) ((width >> 8) & 0xff));
/* Vertical Scaling uses height/2 */
res = height / 2;
/* On 60Hz, it is using a higher Vertical Output Size */
if (!is_50hz)
res += (VRES_60HZ - 480) >> 1;
/* height */
saa711x_write(sd, R_CE_B_VERT_OUTPUT_WINDOW_LENGTH,
(u8) (res & 0xff));
saa711x_write(sd, R_CF_B_VERT_OUTPUT_WINDOW_LENGTH_MSB,
(u8) ((res >> 8) & 0xff));
/* Scaling settings */
/* Hprescaler is floor(inres/outres) */
HPSC = (int)(720 / width);
/* 0 is not allowed (div. by zero) */
HPSC = HPSC ? HPSC : 1;
HFSC = (int)((1024 * 720) / (HPSC * width));
/* FIXME hardcodes to "Task B"
* write H prescaler integer */
saa711x_write(sd, R_D0_B_HORIZ_PRESCALING,
(u8) (HPSC & 0x3f));
v4l2_dbg(1, debug, sd, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC);
/* write H fine-scaling (luminance) */
saa711x_write(sd, R_D8_B_HORIZ_LUMA_SCALING_INC,
(u8) (HFSC & 0xff));
saa711x_write(sd, R_D9_B_HORIZ_LUMA_SCALING_INC_MSB,
(u8) ((HFSC >> 8) & 0xff));
/* write H fine-scaling (chrominance)
* must be lum/2, so i'll just bitshift :) */
saa711x_write(sd, R_DC_B_HORIZ_CHROMA_SCALING,
(u8) ((HFSC >> 1) & 0xff));
saa711x_write(sd, R_DD_B_HORIZ_CHROMA_SCALING_MSB,
(u8) ((HFSC >> 9) & 0xff));
VSCY = (int)((1024 * Vsrc) / height);
v4l2_dbg(1, debug, sd, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY);
/* Correct Contrast and Luminance */
saa711x_write(sd, R_D5_B_LUMA_CONTRAST_CNTL,
(u8) (64 * 1024 / VSCY));
saa711x_write(sd, R_D6_B_CHROMA_SATURATION_CNTL,
(u8) (64 * 1024 / VSCY));
/* write V fine-scaling (luminance) */
saa711x_write(sd, R_E0_B_VERT_LUMA_SCALING_INC,
(u8) (VSCY & 0xff));
saa711x_write(sd, R_E1_B_VERT_LUMA_SCALING_INC_MSB,
(u8) ((VSCY >> 8) & 0xff));
/* write V fine-scaling (chrominance) */
saa711x_write(sd, R_E2_B_VERT_CHROMA_SCALING_INC,
(u8) (VSCY & 0xff));
saa711x_write(sd, R_E3_B_VERT_CHROMA_SCALING_INC_MSB,
(u8) ((VSCY >> 8) & 0xff));
saa711x_writeregs(sd, saa7115_cfg_reset_scaler);
/* Activates task "B" */
saa711x_write(sd, R_80_GLOBAL_CNTL_1,
saa711x_read(sd, R_80_GLOBAL_CNTL_1) | 0x20);
return 0;
}
static void saa711x_set_v4lstd(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa711x_state *state = to_state(sd);
/* Prevent unnecessary standard changes. During a standard
change the I-Port is temporarily disabled. Any devices
reading from that port can get confused.
Note that s_std is also used to switch from
radio to TV mode, so if a s_std is broadcast to
all I2C devices then you do not want to have an unwanted
side-effect here. */
if (std == state->std)
return;
state->std = std;
// This works for NTSC-M, SECAM-L and the 50Hz PAL variants.
if (std & V4L2_STD_525_60) {
v4l2_dbg(1, debug, sd, "decoder set standard 60 Hz\n");
if (state->ident == GM7113C) {
u8 reg = saa711x_read(sd, R_08_SYNC_CNTL);
reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD);
reg |= SAA7113_R_08_FSEL;
saa711x_write(sd, R_08_SYNC_CNTL, reg);
} else {
saa711x_writeregs(sd, saa7115_cfg_60hz_video);
}
saa711x_set_size(sd, 720, 480);
} else {
v4l2_dbg(1, debug, sd, "decoder set standard 50 Hz\n");
if (state->ident == GM7113C) {
u8 reg = saa711x_read(sd, R_08_SYNC_CNTL);
reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD);
saa711x_write(sd, R_08_SYNC_CNTL, reg);
} else {
saa711x_writeregs(sd, saa7115_cfg_50hz_video);
}
saa711x_set_size(sd, 720, 576);
}
/* Register 0E - Bits D6-D4 on NO-AUTO mode
(SAA7111 and SAA7113 doesn't have auto mode)
50 Hz / 625 lines 60 Hz / 525 lines
000 PAL BGDHI (4.43Mhz) NTSC M (3.58MHz)
001 NTSC 4.43 (50 Hz) PAL 4.43 (60 Hz)
010 Combination-PAL N (3.58MHz) NTSC 4.43 (60 Hz)
011 NTSC N (3.58MHz) PAL M (3.58MHz)
100 reserved NTSC-Japan (3.58MHz)
*/
if (state->ident <= SAA7113 ||
state->ident == GM7113C) {
u8 reg = saa711x_read(sd, R_0E_CHROMA_CNTL_1) & 0x8f;
if (std == V4L2_STD_PAL_M) {
reg |= 0x30;
} else if (std == V4L2_STD_PAL_Nc) {
reg |= 0x20;
} else if (std == V4L2_STD_PAL_60) {
reg |= 0x10;
} else if (std == V4L2_STD_NTSC_M_JP) {
reg |= 0x40;
} else if (std & V4L2_STD_SECAM) {
reg |= 0x50;
}
saa711x_write(sd, R_0E_CHROMA_CNTL_1, reg);
} else {
/* restart task B if needed */
int taskb = saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10;
if (taskb && state->ident == SAA7114)
saa711x_writeregs(sd, saa7115_cfg_vbi_on);
/* switch audio mode too! */
saa711x_s_clock_freq(sd, state->audclk_freq);
}
}
/* setup the sliced VBI lcr registers according to the sliced VBI format */
static void saa711x_set_lcr(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
struct saa711x_state *state = to_state(sd);
int is_50hz = (state->std & V4L2_STD_625_50);
u8 lcr[24];
int i, x;
#if 1
/* saa7113/7114/7118 VBI support are experimental */
if (!saa711x_has_reg(state->ident, R_41_LCR_BASE))
return;
#else
/* SAA7113 and SAA7118 also should support VBI - Need testing */
if (state->ident != SAA7115)
return;
#endif
for (i = 0; i <= 23; i++)
lcr[i] = 0xff;
if (fmt == NULL) {
/* raw VBI */
if (is_50hz)
for (i = 6; i <= 23; i++)
lcr[i] = 0xdd;
else
for (i = 10; i <= 21; i++)
lcr[i] = 0xdd;
} else {
/* sliced VBI */
/* first clear lines that cannot be captured */
if (is_50hz) {
for (i = 0; i <= 5; i++)
fmt->service_lines[0][i] =
fmt->service_lines[1][i] = 0;
}
else {
for (i = 0; i <= 9; i++)
fmt->service_lines[0][i] =
fmt->service_lines[1][i] = 0;
for (i = 22; i <= 23; i++)
fmt->service_lines[0][i] =
fmt->service_lines[1][i] = 0;
}
/* Now set the lcr values according to the specified service */
for (i = 6; i <= 23; i++) {
lcr[i] = 0;
for (x = 0; x <= 1; x++) {
switch (fmt->service_lines[1-x][i]) {
case 0:
lcr[i] |= 0xf << (4 * x);
break;
case V4L2_SLICED_TELETEXT_B:
lcr[i] |= 1 << (4 * x);
break;
case V4L2_SLICED_CAPTION_525:
lcr[i] |= 4 << (4 * x);
break;
case V4L2_SLICED_WSS_625:
lcr[i] |= 5 << (4 * x);
break;
case V4L2_SLICED_VPS:
lcr[i] |= 7 << (4 * x);
break;
}
}
}
}
/* write the lcr registers */
for (i = 2; i <= 23; i++) {
saa711x_write(sd, i - 2 + R_41_LCR_BASE, lcr[i]);
}
/* enable/disable raw VBI capturing */
saa711x_writeregs(sd, fmt == NULL ?
saa7115_cfg_vbi_on :
saa7115_cfg_vbi_off);
}
static int saa711x_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *sliced)
{
static u16 lcr2vbi[] = {
0, V4L2_SLICED_TELETEXT_B, 0, /* 1 */
0, V4L2_SLICED_CAPTION_525, /* 4 */
V4L2_SLICED_WSS_625, 0, /* 5 */
V4L2_SLICED_VPS, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0
};
int i;
memset(sliced->service_lines, 0, sizeof(sliced->service_lines));
sliced->service_set = 0;
/* done if using raw VBI */
if (saa711x_read(sd, R_80_GLOBAL_CNTL_1) & 0x10)
return 0;
for (i = 2; i <= 23; i++) {
u8 v = saa711x_read(sd, i - 2 + R_41_LCR_BASE);
sliced->service_lines[0][i] = lcr2vbi[v >> 4];
sliced->service_lines[1][i] = lcr2vbi[v & 0xf];
sliced->service_set |=
sliced->service_lines[0][i] | sliced->service_lines[1][i];
}
return 0;
}
static int saa711x_s_raw_fmt(struct v4l2_subdev *sd, struct v4l2_vbi_format *fmt)
{
saa711x_set_lcr(sd, NULL);
return 0;
}
static int saa711x_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
saa711x_set_lcr(sd, fmt);
return 0;
}
static int saa711x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
if (fmt->code != MEDIA_BUS_FMT_FIXED)
return -EINVAL;
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
return saa711x_set_size(sd, fmt->width, fmt->height);
}
/* Decode the sliced VBI data stream as created by the saa7115.
The format is described in the saa7115 datasheet in Tables 25 and 26
and in Figure 33.
The current implementation uses SAV/EAV codes and not the ancillary data
headers. The vbi->p pointer points to the R_5E_SDID byte right after the SAV
code. */
static int saa711x_decode_vbi_line(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi)
{
struct saa711x_state *state = to_state(sd);
static const char vbi_no_data_pattern[] = {
0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0, 0xa0
};
u8 *p = vbi->p;
u32 wss;
int id1, id2; /* the ID1 and ID2 bytes from the internal header */
vbi->type = 0; /* mark result as a failure */
id1 = p[2];
id2 = p[3];
/* Note: the field bit is inverted for 60 Hz video */
if (state->std & V4L2_STD_525_60)
id1 ^= 0x40;
/* Skip internal header, p now points to the start of the payload */
p += 4;
vbi->p = p;
/* calculate field and line number of the VBI packet (1-23) */
vbi->is_second_field = ((id1 & 0x40) != 0);
vbi->line = (id1 & 0x3f) << 3;
vbi->line |= (id2 & 0x70) >> 4;
/* Obtain data type */
id2 &= 0xf;
/* If the VBI slicer does not detect any signal it will fill up
the payload buffer with 0xa0 bytes. */
if (!memcmp(p, vbi_no_data_pattern, sizeof(vbi_no_data_pattern)))
return 0;
/* decode payloads */
switch (id2) {
case 1:
vbi->type = V4L2_SLICED_TELETEXT_B;
break;
case 4:
if (!saa711x_odd_parity(p[0]) || !saa711x_odd_parity(p[1]))
return 0;
vbi->type = V4L2_SLICED_CAPTION_525;
break;
case 5:
wss = saa711x_decode_wss(p);
if (wss == -1)
return 0;
p[0] = wss & 0xff;
p[1] = wss >> 8;
vbi->type = V4L2_SLICED_WSS_625;
break;
case 7:
if (saa711x_decode_vps(p, p) != 0)
return 0;
vbi->type = V4L2_SLICED_VPS;
break;
default:
break;
}
return 0;
}
/* ============ SAA7115 AUDIO settings (end) ============= */
static int saa711x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct saa711x_state *state = to_state(sd);
int status;
if (state->radio)
return 0;
status = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
v4l2_dbg(1, debug, sd, "status: 0x%02x\n", status);
vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0;
return 0;
}
static int saa711x_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa711x_state *state = to_state(sd);
state->radio = 0;
saa711x_set_v4lstd(sd, std);
return 0;
}
static int saa711x_s_radio(struct v4l2_subdev *sd)
{
struct saa711x_state *state = to_state(sd);
state->radio = 1;
return 0;
}
static int saa711x_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct saa711x_state *state = to_state(sd);
u8 mask = (state->ident <= SAA7111A) ? 0xf8 : 0xf0;
v4l2_dbg(1, debug, sd, "decoder set input %d output %d\n",
input, output);
/* saa7111/3 does not have these inputs */
if ((state->ident <= SAA7113 ||
state->ident == GM7113C) &&
(input == SAA7115_COMPOSITE4 ||
input == SAA7115_COMPOSITE5)) {
return -EINVAL;
}
if (input > SAA7115_SVIDEO3)
return -EINVAL;
if (state->input == input && state->output == output)
return 0;
v4l2_dbg(1, debug, sd, "now setting %s input %s output\n",
(input >= SAA7115_SVIDEO0) ? "S-Video" : "Composite",
(output == SAA7115_IPORT_ON) ? "iport on" : "iport off");
state->input = input;
/* saa7111 has slightly different input numbering */
if (state->ident <= SAA7111A) {
if (input >= SAA7115_COMPOSITE4)
input -= 2;
/* saa7111 specific */
saa711x_write(sd, R_10_CHROMA_CNTL_2,
(saa711x_read(sd, R_10_CHROMA_CNTL_2) & 0x3f) |
((output & 0xc0) ^ 0x40));
saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL,
(saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL) & 0xf0) |
((output & 2) ? 0x0a : 0));
}
/* select mode */
saa711x_write(sd, R_02_INPUT_CNTL_1,
(saa711x_read(sd, R_02_INPUT_CNTL_1) & mask) |
input);
/* bypass chrominance trap for S-Video modes */
saa711x_write(sd, R_09_LUMA_CNTL,
(saa711x_read(sd, R_09_LUMA_CNTL) & 0x7f) |
(state->input >= SAA7115_SVIDEO0 ? 0x80 : 0x0));
state->output = output;
if (state->ident == SAA7114 ||
state->ident == SAA7115) {
saa711x_write(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK,
(saa711x_read(sd, R_83_X_PORT_I_O_ENA_AND_OUT_CLK) & 0xfe) |
(state->output & 0x01));
}
if (state->ident > SAA7111A) {
if (config & SAA7115_IDQ_IS_DEFAULT)
saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x20);
else
saa711x_write(sd, R_85_I_PORT_SIGNAL_POLAR, 0x21);
}
return 0;
}
static int saa711x_s_gpio(struct v4l2_subdev *sd, u32 val)
{
struct saa711x_state *state = to_state(sd);
if (state->ident > SAA7111A)
return -EINVAL;
saa711x_write(sd, 0x11, (saa711x_read(sd, 0x11) & 0x7f) |
(val ? 0x80 : 0));
return 0;
}
static int saa711x_s_stream(struct v4l2_subdev *sd, int enable)
{
struct saa711x_state *state = to_state(sd);
v4l2_dbg(1, debug, sd, "%s output\n",
enable ? "enable" : "disable");
if (state->enable == enable)
return 0;
state->enable = enable;
if (!saa711x_has_reg(state->ident, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED))
return 0;
saa711x_write(sd, R_87_I_PORT_I_O_ENA_OUT_CLK_AND_GATED, state->enable);
return 0;
}
static int saa711x_s_crystal_freq(struct v4l2_subdev *sd, u32 freq, u32 flags)
{
struct saa711x_state *state = to_state(sd);
if (freq != SAA7115_FREQ_32_11_MHZ && freq != SAA7115_FREQ_24_576_MHZ)
return -EINVAL;
state->crystal_freq = freq;
state->double_asclk = flags & SAA7115_FREQ_FL_DOUBLE_ASCLK;
state->cgcdiv = (flags & SAA7115_FREQ_FL_CGCDIV) ? 3 : 4;
state->ucgc = flags & SAA7115_FREQ_FL_UCGC;
state->apll = flags & SAA7115_FREQ_FL_APLL;
saa711x_s_clock_freq(sd, state->audclk_freq);
return 0;
}
static int saa711x_reset(struct v4l2_subdev *sd, u32 val)
{
v4l2_dbg(1, debug, sd, "decoder RESET\n");
saa711x_writeregs(sd, saa7115_cfg_reset_scaler);
return 0;
}
static int saa711x_g_vbi_data(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *data)
{
/* Note: the internal field ID is inverted for NTSC,
so data->field 0 maps to the saa7115 even field,
whereas for PAL it maps to the saa7115 odd field. */
switch (data->id) {
case V4L2_SLICED_WSS_625:
if (saa711x_read(sd, 0x6b) & 0xc0)
return -EIO;
data->data[0] = saa711x_read(sd, 0x6c);
data->data[1] = saa711x_read(sd, 0x6d);
return 0;
case V4L2_SLICED_CAPTION_525:
if (data->field == 0) {
/* CC */
if (saa711x_read(sd, 0x66) & 0x30)
return -EIO;
data->data[0] = saa711x_read(sd, 0x69);
data->data[1] = saa711x_read(sd, 0x6a);
return 0;
}
/* XDS */
if (saa711x_read(sd, 0x66) & 0xc0)
return -EIO;
data->data[0] = saa711x_read(sd, 0x67);
data->data[1] = saa711x_read(sd, 0x68);
return 0;
default:
return -EINVAL;
}
}
static int saa711x_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct saa711x_state *state = to_state(sd);
int reg1f, reg1e;
/*
* The V4L2 core already initializes std with all supported
* Standards. All driver needs to do is to mask it, to remove
* standards that don't apply from the mask
*/
reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
if (state->ident == SAA7115) {
reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC);
v4l2_dbg(1, debug, sd, "Status byte 1 (0x1e)=0x%02x\n", reg1e);
switch (reg1e & 0x03) {
case 1:
*std &= V4L2_STD_NTSC;
break;
case 2:
/*
* V4L2_STD_PAL just cover the european PAL standards.
* This is wrong, as the device could also be using an
* other PAL standard.
*/
*std &= V4L2_STD_PAL | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
V4L2_STD_PAL_M | V4L2_STD_PAL_60;
break;
case 3:
*std &= V4L2_STD_SECAM;
break;
default:
*std = V4L2_STD_UNKNOWN;
/* Can't detect anything */
break;
}
}
v4l2_dbg(1, debug, sd, "Status byte 2 (0x1f)=0x%02x\n", reg1f);
/* horizontal/vertical not locked */
if (reg1f & 0x40) {
*std = V4L2_STD_UNKNOWN;
goto ret;
}
if (reg1f & 0x20)
*std &= V4L2_STD_525_60;
else
*std &= V4L2_STD_625_50;
ret:
v4l2_dbg(1, debug, sd, "detected std mask = %08Lx\n", *std);
return 0;
}
static int saa711x_g_input_status(struct v4l2_subdev *sd, u32 *status)
{
struct saa711x_state *state = to_state(sd);
int reg1e = 0x80;
int reg1f;
*status = V4L2_IN_ST_NO_SIGNAL;
if (state->ident == SAA7115)
reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC);
reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
if ((reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80)
*status = 0;
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int saa711x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
reg->val = saa711x_read(sd, reg->reg & 0xff);
reg->size = 1;
return 0;
}
static int saa711x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
{
saa711x_write(sd, reg->reg & 0xff, reg->val & 0xff);
return 0;
}
#endif
static int saa711x_log_status(struct v4l2_subdev *sd)
{
struct saa711x_state *state = to_state(sd);
int reg1e, reg1f;
int signalOk;
int vcr;
v4l2_info(sd, "Audio frequency: %d Hz\n", state->audclk_freq);
if (state->ident != SAA7115) {
/* status for the saa7114 */
reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
signalOk = (reg1f & 0xc1) == 0x81;
v4l2_info(sd, "Video signal: %s\n", signalOk ? "ok" : "bad");
v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
return 0;
}
/* status for the saa7115 */
reg1e = saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC);
reg1f = saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC);
signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80;
vcr = !(reg1f & 0x10);
if (state->input >= 6)
v4l2_info(sd, "Input: S-Video %d\n", state->input - 6);
else
v4l2_info(sd, "Input: Composite %d\n", state->input);
v4l2_info(sd, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad");
v4l2_info(sd, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
switch (reg1e & 0x03) {
case 1:
v4l2_info(sd, "Detected format: NTSC\n");
break;
case 2:
v4l2_info(sd, "Detected format: PAL\n");
break;
case 3:
v4l2_info(sd, "Detected format: SECAM\n");
break;
default:
v4l2_info(sd, "Detected format: BW/No color\n");
break;
}
v4l2_info(sd, "Width, Height: %d, %d\n", state->width, state->height);
v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops saa711x_ctrl_ops = {
.s_ctrl = saa711x_s_ctrl,
.g_volatile_ctrl = saa711x_g_volatile_ctrl,
};
static const struct v4l2_subdev_core_ops saa711x_core_ops = {
.log_status = saa711x_log_status,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.reset = saa711x_reset,
.s_gpio = saa711x_s_gpio,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = saa711x_g_register,
.s_register = saa711x_s_register,
#endif
};
static const struct v4l2_subdev_tuner_ops saa711x_tuner_ops = {
.s_radio = saa711x_s_radio,
.g_tuner = saa711x_g_tuner,
};
static const struct v4l2_subdev_audio_ops saa711x_audio_ops = {
.s_clock_freq = saa711x_s_clock_freq,
};
static const struct v4l2_subdev_video_ops saa711x_video_ops = {
.s_std = saa711x_s_std,
.s_routing = saa711x_s_routing,
.s_crystal_freq = saa711x_s_crystal_freq,
.s_mbus_fmt = saa711x_s_mbus_fmt,
.s_stream = saa711x_s_stream,
.querystd = saa711x_querystd,
.g_input_status = saa711x_g_input_status,
};
static const struct v4l2_subdev_vbi_ops saa711x_vbi_ops = {
.g_vbi_data = saa711x_g_vbi_data,
.decode_vbi_line = saa711x_decode_vbi_line,
.g_sliced_fmt = saa711x_g_sliced_fmt,
.s_sliced_fmt = saa711x_s_sliced_fmt,
.s_raw_fmt = saa711x_s_raw_fmt,
};
static const struct v4l2_subdev_ops saa711x_ops = {
.core = &saa711x_core_ops,
.tuner = &saa711x_tuner_ops,
.audio = &saa711x_audio_ops,
.video = &saa711x_video_ops,
.vbi = &saa711x_vbi_ops,
};
#define CHIP_VER_SIZE 16
/* ----------------------------------------------------------------------- */
static void saa711x_write_platform_data(struct saa711x_state *state,
struct saa7115_platform_data *data)
{
struct v4l2_subdev *sd = &state->sd;
u8 work;
if (state->ident != GM7113C &&
state->ident != SAA7113)
return;
if (data->saa7113_r08_htc) {
work = saa711x_read(sd, R_08_SYNC_CNTL);
work &= ~SAA7113_R_08_HTC_MASK;
work |= ((*data->saa7113_r08_htc) << SAA7113_R_08_HTC_OFFSET);
saa711x_write(sd, R_08_SYNC_CNTL, work);
}
if (data->saa7113_r10_vrln) {
work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
work &= ~SAA7113_R_10_VRLN_MASK;
if (*data->saa7113_r10_vrln)
work |= (1 << SAA7113_R_10_VRLN_OFFSET);
saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
}
if (data->saa7113_r10_ofts) {
work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
work &= ~SAA7113_R_10_OFTS_MASK;
work |= (*data->saa7113_r10_ofts << SAA7113_R_10_OFTS_OFFSET);
saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
}
if (data->saa7113_r12_rts0) {
work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
work &= ~SAA7113_R_12_RTS0_MASK;
work |= (*data->saa7113_r12_rts0 << SAA7113_R_12_RTS0_OFFSET);
/* According to the datasheet,
* SAA7113_RTS_DOT_IN should only be used on RTS1 */
WARN_ON(*data->saa7113_r12_rts0 == SAA7113_RTS_DOT_IN);
saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
}
if (data->saa7113_r12_rts1) {
work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
work &= ~SAA7113_R_12_RTS1_MASK;
work |= (*data->saa7113_r12_rts1 << SAA7113_R_12_RTS1_OFFSET);
saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
}
if (data->saa7113_r13_adlsb) {
work = saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL);
work &= ~SAA7113_R_13_ADLSB_MASK;
if (*data->saa7113_r13_adlsb)
work |= (1 << SAA7113_R_13_ADLSB_OFFSET);
saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, work);
}
}
/**
* saa711x_detect_chip - Detects the saa711x (or clone) variant
* @client: I2C client structure.
* @id: I2C device ID structure.
* @name: Name of the device to be filled.
*
* Detects the Philips/NXP saa711x chip, or some clone of it.
* if 'id' is NULL or id->driver_data is equal to 1, it auto-probes
* the analog demod.
* If the tuner is not found, it returns -ENODEV.
* If auto-detection is disabled and the tuner doesn't match what it was
* required, it returns -EINVAL and fills 'name'.
* If the chip is found, it returns the chip ID and fills 'name'.
*/
static int saa711x_detect_chip(struct i2c_client *client,
const struct i2c_device_id *id,
char *name)
{
char chip_ver[CHIP_VER_SIZE];
char chip_id;
int i;
int autodetect;
autodetect = !id || id->driver_data == 1;
/* Read the chip version register */
for (i = 0; i < CHIP_VER_SIZE; i++) {
i2c_smbus_write_byte_data(client, 0, i);
chip_ver[i] = i2c_smbus_read_byte_data(client, 0);
name[i] = (chip_ver[i] & 0x0f) + '0';
if (name[i] > '9')
name[i] += 'a' - '9' - 1;
}
name[i] = '\0';
/* Check if it is a Philips/NXP chip */
if (!memcmp(name + 1, "f711", 4)) {
chip_id = name[5];
snprintf(name, CHIP_VER_SIZE, "saa711%c", chip_id);
if (!autodetect && strcmp(name, id->name))
return -EINVAL;
switch (chip_id) {
case '1':
if (chip_ver[0] & 0xf0) {
snprintf(name, CHIP_VER_SIZE, "saa711%ca", chip_id);
v4l_info(client, "saa7111a variant found\n");
return SAA7111A;
}
return SAA7111;
case '3':
return SAA7113;
case '4':
return SAA7114;
case '5':
return SAA7115;
case '8':
return SAA7118;
default:
v4l2_info(client,
"WARNING: Philips/NXP chip unknown - Falling back to saa7111\n");
return SAA7111;
}
}
/* Check if it is a gm7113c */
if (!memcmp(name, "0000", 4)) {
chip_id = 0;
for (i = 0; i < 4; i++) {
chip_id = chip_id << 1;
chip_id |= (chip_ver[i] & 0x80) ? 1 : 0;
}
/*
* Note: From the datasheet, only versions 1 and 2
* exists. However, tests on a device labeled as:
* "GM7113C 1145" returned "10" on all 16 chip
* version (reg 0x00) reads. So, we need to also
* accept at least verion 0. For now, let's just
* assume that a device that returns "0000" for
* the lower nibble is a gm7113c.
*/
strlcpy(name, "gm7113c", CHIP_VER_SIZE);
if (!autodetect && strcmp(name, id->name))
return -EINVAL;
v4l_dbg(1, debug, client,
"It seems to be a %s chip (%*ph) @ 0x%x.\n",
name, 16, chip_ver, client->addr << 1);
return GM7113C;
}
/* Chip was not discovered. Return its ID and don't bind */
v4l_dbg(1, debug, client, "chip %*ph @ 0x%x is unknown.\n",
16, chip_ver, client->addr << 1);
return -ENODEV;
}
static int saa711x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct saa711x_state *state;
struct v4l2_subdev *sd;
struct v4l2_ctrl_handler *hdl;
struct saa7115_platform_data *pdata;
int ident;
char name[CHIP_VER_SIZE + 1];
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
ident = saa711x_detect_chip(client, id, name);
if (ident == -EINVAL) {
/* Chip exists, but doesn't match */
v4l_warn(client, "found %s while %s was expected\n",
name, id->name);
return -ENODEV;
}
if (ident < 0)
return ident;
strlcpy(client->name, name, sizeof(client->name));
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
v4l_info(client, "%s found @ 0x%x (%s)\n", name,
client->addr << 1, client->adapter->name);
hdl = &state->hdl;
v4l2_ctrl_handler_init(hdl, 6);
/* add in ascending ID order */
v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_CONTRAST, 0, 127, 1, 64);
v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_SATURATION, 0, 127, 1, 64);
v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
state->agc = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_CHROMA_AGC, 0, 1, 1, 1);
state->gain = v4l2_ctrl_new_std(hdl, &saa711x_ctrl_ops,
V4L2_CID_CHROMA_GAIN, 0, 127, 1, 40);
sd->ctrl_handler = hdl;
if (hdl->error) {
int err = hdl->error;
v4l2_ctrl_handler_free(hdl);
return err;
}
v4l2_ctrl_auto_cluster(2, &state->agc, 0, true);
state->input = -1;
state->output = SAA7115_IPORT_ON;
state->enable = 1;
state->radio = 0;
state->ident = ident;
state->audclk_freq = 48000;
v4l2_dbg(1, debug, sd, "writing init values\n");
/* init to 60hz/48khz */
state->crystal_freq = SAA7115_FREQ_24_576_MHZ;
pdata = client->dev.platform_data;
switch (state->ident) {
case SAA7111:
case SAA7111A:
saa711x_writeregs(sd, saa7111_init);
break;
case GM7113C:
saa711x_writeregs(sd, gm7113c_init);
break;
case SAA7113:
if (pdata && pdata->saa7113_force_gm7113c_init)
saa711x_writeregs(sd, gm7113c_init);
else
saa711x_writeregs(sd, saa7113_init);
break;
default:
state->crystal_freq = SAA7115_FREQ_32_11_MHZ;
saa711x_writeregs(sd, saa7115_init_auto_input);
}
if (state->ident > SAA7111A && state->ident != GM7113C)
saa711x_writeregs(sd, saa7115_init_misc);
if (pdata)
saa711x_write_platform_data(state, pdata);
saa711x_set_v4lstd(sd, V4L2_STD_NTSC);
v4l2_ctrl_handler_setup(hdl);
v4l2_dbg(1, debug, sd, "status: (1E) 0x%02x, (1F) 0x%02x\n",
saa711x_read(sd, R_1E_STATUS_BYTE_1_VD_DEC),
saa711x_read(sd, R_1F_STATUS_BYTE_2_VD_DEC));
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa711x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
return 0;
}
static const struct i2c_device_id saa711x_id[] = {
{ "saa7115_auto", 1 }, /* autodetect */
{ "saa7111", 0 },
{ "saa7113", 0 },
{ "saa7114", 0 },
{ "saa7115", 0 },
{ "saa7118", 0 },
{ "gm7113c", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa711x_id);
static struct i2c_driver saa711x_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "saa7115",
},
.probe = saa711x_probe,
.remove = saa711x_remove,
.id_table = saa711x_id,
};
module_i2c_driver(saa711x_driver);
| gpl-2.0 |
LiYihai/linux-2.6.39.4-notes | fs/jfs/ioctl.c | 2996 | 3267 | /*
* linux/fs/jfs/ioctl.c
*
* Copyright (C) 2006 Herbert Poetzl
* adapted from Remy Card's ext2/ioctl.c
*/
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/capability.h>
#include <linux/mount.h>
#include <linux/time.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include "jfs_incore.h"
#include "jfs_dinode.h"
#include "jfs_inode.h"
static struct {
long jfs_flag;
long ext2_flag;
} jfs_map[] = {
{JFS_NOATIME_FL, FS_NOATIME_FL},
{JFS_DIRSYNC_FL, FS_DIRSYNC_FL},
{JFS_SYNC_FL, FS_SYNC_FL},
{JFS_SECRM_FL, FS_SECRM_FL},
{JFS_UNRM_FL, FS_UNRM_FL},
{JFS_APPEND_FL, FS_APPEND_FL},
{JFS_IMMUTABLE_FL, FS_IMMUTABLE_FL},
{0, 0},
};
static long jfs_map_ext2(unsigned long flags, int from)
{
int index=0;
long mapped=0;
while (jfs_map[index].jfs_flag) {
if (from) {
if (jfs_map[index].ext2_flag & flags)
mapped |= jfs_map[index].jfs_flag;
} else {
if (jfs_map[index].jfs_flag & flags)
mapped |= jfs_map[index].ext2_flag;
}
index++;
}
return mapped;
}
long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = filp->f_dentry->d_inode;
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
unsigned int flags;
switch (cmd) {
case JFS_IOC_GETFLAGS:
jfs_get_inode_flags(jfs_inode);
flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE;
flags = jfs_map_ext2(flags, 0);
return put_user(flags, (int __user *) arg);
case JFS_IOC_SETFLAGS: {
unsigned int oldflags;
int err;
err = mnt_want_write(filp->f_path.mnt);
if (err)
return err;
if (!inode_owner_or_capable(inode)) {
err = -EACCES;
goto setflags_out;
}
if (get_user(flags, (int __user *) arg)) {
err = -EFAULT;
goto setflags_out;
}
flags = jfs_map_ext2(flags, 1);
if (!S_ISDIR(inode->i_mode))
flags &= ~JFS_DIRSYNC_FL;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode)) {
err = -EPERM;
goto setflags_out;
}
/* Lock against other parallel changes of flags */
mutex_lock(&inode->i_mutex);
jfs_get_inode_flags(jfs_inode);
oldflags = jfs_inode->mode2;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by
* the relevant capability.
*/
if ((oldflags & JFS_IMMUTABLE_FL) ||
((flags ^ oldflags) &
(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
mutex_unlock(&inode->i_mutex);
err = -EPERM;
goto setflags_out;
}
}
flags = flags & JFS_FL_USER_MODIFIABLE;
flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
jfs_inode->mode2 = flags;
jfs_set_inode_flags(inode);
mutex_unlock(&inode->i_mutex);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
setflags_out:
mnt_drop_write(filp->f_path.mnt);
return err;
}
default:
return -ENOTTY;
}
}
#ifdef CONFIG_COMPAT
long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
/* While these ioctl numbers defined with 'long' and have different
* numbers than the 64bit ABI,
* the actual implementation only deals with ints and is compatible.
*/
switch (cmd) {
case JFS_IOC_GETFLAGS32:
cmd = JFS_IOC_GETFLAGS;
break;
case JFS_IOC_SETFLAGS32:
cmd = JFS_IOC_SETFLAGS;
break;
}
return jfs_ioctl(filp, cmd, arg);
}
#endif
| gpl-2.0 |
carz2/cm-kernel | arch/m68k/hp300/time.c | 4532 | 1989 | /*
* linux/arch/m68k/hp300/time.c
*
* Copyright (C) 1998 Philip Blundell <philb@gnu.org>
*
* This file contains the HP300-specific time handling code.
*/
#include <asm/ptrace.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/blinken.h>
/* Clock hardware definitions */
#define CLOCKBASE 0xf05f8000
#define CLKCR1 0x1
#define CLKCR2 0x3
#define CLKCR3 CLKCR1
#define CLKSR CLKCR2
#define CLKMSB1 0x5
#define CLKMSB2 0x9
#define CLKMSB3 0xD
/* This is for machines which generate the exact clock. */
#define USECS_PER_JIFFY (1000000/HZ)
#define INTVAL ((10000 / 4) - 1)
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
unsigned long tmp;
irq_handler_t vector = dev_id;
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
return vector(irq, NULL);
}
unsigned long hp300_gettimeoffset(void)
{
/* Read current timer 1 value */
unsigned char lsb, msb1, msb2;
unsigned short ticks;
msb1 = in_8(CLOCKBASE + 5);
lsb = in_8(CLOCKBASE + 7);
msb2 = in_8(CLOCKBASE + 5);
if (msb1 != msb2)
/* A carry happened while we were reading. Read it again */
lsb = in_8(CLOCKBASE + 7);
ticks = INTVAL - ((msb2 << 8) | lsb);
return (USECS_PER_JIFFY * ticks) / INTVAL;
}
void __init hp300_sched_init(irq_handler_t vector)
{
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x1); /* reset */
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
}
| gpl-2.0 |
alexey6600/M8_Sense_7.00 | tools/perf/util/cgroup.c | 5044 | 3197 | #include "util.h"
#include "../perf.h"
#include "parse-options.h"
#include "evsel.h"
#include "cgroup.h"
#include "evlist.h"
int nr_cgroups;
static int
cgroupfs_find_mountpoint(char *buf, size_t maxlen)
{
FILE *fp;
char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
char *token, *saved_ptr = NULL;
int found = 0;
fp = fopen("/proc/mounts", "r");
if (!fp)
return -1;
/*
* in order to handle split hierarchy, we need to scan /proc/mounts
* and inspect every cgroupfs mount point to find one that has
* perf_event subsystem
*/
while (fscanf(fp, "%*s %"STR(PATH_MAX)"s %"STR(PATH_MAX)"s %"
STR(PATH_MAX)"s %*d %*d\n",
mountpoint, type, tokens) == 3) {
if (!strcmp(type, "cgroup")) {
token = strtok_r(tokens, ",", &saved_ptr);
while (token != NULL) {
if (!strcmp(token, "perf_event")) {
found = 1;
break;
}
token = strtok_r(NULL, ",", &saved_ptr);
}
}
if (found)
break;
}
fclose(fp);
if (!found)
return -1;
if (strlen(mountpoint) < maxlen) {
strcpy(buf, mountpoint);
return 0;
}
return -1;
}
static int open_cgroup(char *name)
{
char path[PATH_MAX + 1];
char mnt[PATH_MAX + 1];
int fd;
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1))
return -1;
snprintf(path, PATH_MAX, "%s/%s", mnt, name);
fd = open(path, O_RDONLY);
if (fd == -1)
fprintf(stderr, "no access to cgroup %s\n", path);
return fd;
}
static int add_cgroup(struct perf_evlist *evlist, char *str)
{
struct perf_evsel *counter;
struct cgroup_sel *cgrp = NULL;
int n;
/*
* check if cgrp is already defined, if so we reuse it
*/
list_for_each_entry(counter, &evlist->entries, node) {
cgrp = counter->cgrp;
if (!cgrp)
continue;
if (!strcmp(cgrp->name, str))
break;
cgrp = NULL;
}
if (!cgrp) {
cgrp = zalloc(sizeof(*cgrp));
if (!cgrp)
return -1;
cgrp->name = str;
cgrp->fd = open_cgroup(str);
if (cgrp->fd == -1) {
free(cgrp);
return -1;
}
}
/*
* find corresponding event
* if add cgroup N, then need to find event N
*/
n = 0;
list_for_each_entry(counter, &evlist->entries, node) {
if (n == nr_cgroups)
goto found;
n++;
}
if (cgrp->refcnt == 0)
free(cgrp);
return -1;
found:
cgrp->refcnt++;
counter->cgrp = cgrp;
return 0;
}
void close_cgroup(struct cgroup_sel *cgrp)
{
if (!cgrp)
return;
/* XXX: not reentrant */
if (--cgrp->refcnt == 0) {
close(cgrp->fd);
free(cgrp->name);
free(cgrp);
}
}
int parse_cgroups(const struct option *opt __used, const char *str,
int unset __used)
{
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
const char *p, *e, *eos = str + strlen(str);
char *s;
int ret;
if (list_empty(&evlist->entries)) {
fprintf(stderr, "must define events before cgroups\n");
return -1;
}
for (;;) {
p = strchr(str, ',');
e = p ? p : eos;
/* allow empty cgroups, i.e., skip */
if (e - str) {
/* termination added */
s = strndup(str, e - str);
if (!s)
return -1;
ret = add_cgroup(evlist, s);
if (ret) {
free(s);
return -1;
}
}
/* nr_cgroups is increased een for empty cgroups */
nr_cgroups++;
if (!p)
break;
str = p+1;
}
return 0;
}
| gpl-2.0 |
TeamHorizon/android_kernel_htc_endeavoru | fs/gfs2/dentry.c | 8116 | 2926 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/namei.h>
#include <linux/crc32.h>
#include "gfs2.h"
#include "incore.h"
#include "dir.h"
#include "glock.h"
#include "super.h"
#include "util.h"
#include "inode.h"
/**
* gfs2_drevalidate - Check directory lookup consistency
* @dentry: the mapping to check
* @nd:
*
* Check to make sure the lookup necessary to arrive at this inode from its
* parent is still good.
*
* Returns: 1 if the dentry is ok, 0 if it isn't
*/
static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
{
struct dentry *parent;
struct gfs2_sbd *sdp;
struct gfs2_inode *dip;
struct inode *inode;
struct gfs2_holder d_gh;
struct gfs2_inode *ip = NULL;
int error;
int had_lock = 0;
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
parent = dget_parent(dentry);
sdp = GFS2_SB(parent->d_inode);
dip = GFS2_I(parent->d_inode);
inode = dentry->d_inode;
if (inode) {
if (is_bad_inode(inode))
goto invalid;
ip = GFS2_I(inode);
}
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
goto valid;
had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL);
if (!had_lock) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
goto fail;
}
error = gfs2_dir_check(parent->d_inode, &dentry->d_name, ip);
switch (error) {
case 0:
if (!inode)
goto invalid_gunlock;
break;
case -ENOENT:
if (!inode)
goto valid_gunlock;
goto invalid_gunlock;
default:
goto fail_gunlock;
}
valid_gunlock:
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
valid:
dput(parent);
return 1;
invalid_gunlock:
if (!had_lock)
gfs2_glock_dq_uninit(&d_gh);
invalid:
if (inode && S_ISDIR(inode->i_mode)) {
if (have_submounts(dentry))
goto valid;
shrink_dcache_parent(dentry);
}
d_drop(dentry);
dput(parent);
return 0;
fail_gunlock:
gfs2_glock_dq_uninit(&d_gh);
fail:
dput(parent);
return 0;
}
static int gfs2_dhash(const struct dentry *dentry, const struct inode *inode,
struct qstr *str)
{
str->hash = gfs2_disk_hash(str->name, str->len);
return 0;
}
static int gfs2_dentry_delete(const struct dentry *dentry)
{
struct gfs2_inode *ginode;
if (!dentry->d_inode)
return 0;
ginode = GFS2_I(dentry->d_inode);
if (!ginode->i_iopen_gh.gh_gl)
return 0;
if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
return 1;
return 0;
}
const struct dentry_operations gfs2_dops = {
.d_revalidate = gfs2_drevalidate,
.d_hash = gfs2_dhash,
.d_delete = gfs2_dentry_delete,
};
| gpl-2.0 |
ZhizhouTian/kernel-stable | arch/powerpc/sysdev/ge/ge_pic.c | 8372 | 6876 | /*
* Interrupt handling for GE FPGA based PIC
*
* Author: Martyn Welch <martyn.welch@ge.com>
*
* 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include "ge_pic.h"
#define DEBUG
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(KERN_DEBUG "gef_pic: " fmt); } while (0)
#else
#define DBG(fmt...) do { } while (0)
#endif
#define GEF_PIC_NUM_IRQS 32
/* Interrupt Controller Interface Registers */
#define GEF_PIC_INTR_STATUS 0x0000
#define GEF_PIC_INTR_MASK(cpu) (0x0010 + (0x4 * cpu))
#define GEF_PIC_CPU0_INTR_MASK GEF_PIC_INTR_MASK(0)
#define GEF_PIC_CPU1_INTR_MASK GEF_PIC_INTR_MASK(1)
#define GEF_PIC_MCP_MASK(cpu) (0x0018 + (0x4 * cpu))
#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0)
#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1)
static DEFINE_RAW_SPINLOCK(gef_pic_lock);
static void __iomem *gef_pic_irq_reg_base;
static struct irq_domain *gef_pic_irq_host;
static int gef_pic_cascade_irq;
/*
* Interrupt Controller Handling
*
* The interrupt controller handles interrupts for most on board interrupts,
* apart from PCI interrupts. For example on SBC610:
*
* 17:31 RO Reserved
* 16 RO PCI Express Doorbell 3 Status
* 15 RO PCI Express Doorbell 2 Status
* 14 RO PCI Express Doorbell 1 Status
* 13 RO PCI Express Doorbell 0 Status
* 12 RO Real Time Clock Interrupt Status
* 11 RO Temperature Interrupt Status
* 10 RO Temperature Critical Interrupt Status
* 9 RO Ethernet PHY1 Interrupt Status
* 8 RO Ethernet PHY3 Interrupt Status
* 7 RO PEX8548 Interrupt Status
* 6 RO Reserved
* 5 RO Watchdog 0 Interrupt Status
* 4 RO Watchdog 1 Interrupt Status
* 3 RO AXIS Message FIFO A Interrupt Status
* 2 RO AXIS Message FIFO B Interrupt Status
* 1 RO AXIS Message FIFO C Interrupt Status
* 0 RO AXIS Message FIFO D Interrupt Status
*
* Interrupts can be forwarded to one of two output lines. Nothing
* clever is done, so if the masks are incorrectly set, a single input
* interrupt could generate interrupts on both output lines!
*
* The dual lines are there to allow the chained interrupts to be easily
* passed into two different cores. We currently do not use this functionality
* in this driver.
*
* Controller can also be configured to generate Machine checks (MCP), again on
* two lines, to be attached to two different cores. It is suggested that these
* should be masked out.
*/
void gef_pic_cascade(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
* See if we actually have an interrupt, call generic handling code if
* we do.
*/
cascade_irq = gef_pic_get_irq();
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void gef_pic_mask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask &= ~(1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
}
static void gef_pic_mask_ack(struct irq_data *d)
{
/* Don't think we actually have to do anything to ack an interrupt,
* we just need to clear down the devices interrupt and it will go away
*/
gef_pic_mask(d);
}
static void gef_pic_unmask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask |= (1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
}
static struct irq_chip gef_pic_chip = {
.name = "gefp",
.irq_mask = gef_pic_mask,
.irq_mask_ack = gef_pic_mask_ack,
.irq_unmask = gef_pic_unmask,
};
/* When an interrupt is being configured, this call allows some flexibilty
* in deciding which irq_chip structure is used
*/
static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
return 0;
}
static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
if (intsize > 1)
*out_flags = intspec[1];
else
*out_flags = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
static const struct irq_domain_ops gef_pic_host_ops = {
.map = gef_pic_host_map,
.xlate = gef_pic_host_xlate,
};
/*
* Initialisation of PIC, this should be called in BSP
*/
void __init gef_pic_init(struct device_node *np)
{
unsigned long flags;
/* Map the devices registers into memory */
gef_pic_irq_reg_base = of_iomap(np, 0);
raw_spin_lock_irqsave(&gef_pic_lock, flags);
/* Initialise everything as masked. */
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_INTR_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
/* Map controller */
gef_pic_cascade_irq = irq_of_parse_and_map(np, 0);
if (gef_pic_cascade_irq == NO_IRQ) {
printk(KERN_ERR "SBC610: failed to map cascade interrupt");
return;
}
/* Setup an irq_domain structure */
gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
&gef_pic_host_ops, NULL);
if (gef_pic_irq_host == NULL)
return;
/* Chain with parent controller */
irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
}
/*
* This is called when we receive an interrupt with apparently comes from this
* chip - check, returning the highest interrupt generated or return NO_IRQ
*/
unsigned int gef_pic_get_irq(void)
{
u32 cause, mask, active;
unsigned int virq = NO_IRQ;
int hwirq;
cause = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_STATUS);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
active = cause & mask;
if (active) {
for (hwirq = GEF_PIC_NUM_IRQS - 1; hwirq > -1; hwirq--) {
if (active & (0x1 << hwirq))
break;
}
virq = irq_linear_revmap(gef_pic_irq_host,
(irq_hw_number_t)hwirq);
}
return virq;
}
| gpl-2.0 |
McBane87/Sony_Z1c_LP.242_Kernel | tools/power/cpupower/utils/cpupower-info.c | 8372 | 3014 | /*
* (C) 2011 Thomas Renninger <trenn@suse.de>, Novell Inc.
*
* Licensed under the terms of the GNU GPL License version 2.
*/
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <getopt.h>
#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
static struct option set_opts[] = {
{ .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
{ .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
{ .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
{ },
};
static void print_wrong_arg_exit(void)
{
printf(_("invalid or unknown argument\n"));
exit(EXIT_FAILURE);
}
int cmd_info(int argc, char **argv)
{
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
union {
struct {
int sched_mc:1;
int sched_smt:1;
int perf_bias:1;
};
int params;
} params = {};
int ret = 0;
setlocale(LC_ALL, "");
textdomain(PACKAGE);
/* parameter parsing */
while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) {
switch (ret) {
case 'b':
if (params.perf_bias)
print_wrong_arg_exit();
params.perf_bias = 1;
break;
case 'm':
if (params.sched_mc)
print_wrong_arg_exit();
params.sched_mc = 1;
break;
case 's':
if (params.sched_smt)
print_wrong_arg_exit();
params.sched_smt = 1;
break;
default:
print_wrong_arg_exit();
}
};
if (!params.params)
params.params = 0x7;
/* Default is: show output of CPU 0 only */
if (bitmask_isallclear(cpus_chosen))
bitmask_setbit(cpus_chosen, 0);
if (params.sched_mc) {
ret = sysfs_get_sched("mc");
printf(_("System's multi core scheduler setting: "));
if (ret < 0)
/* if sysfs file is missing it's: errno == ENOENT */
printf(_("not supported\n"));
else
printf("%d\n", ret);
}
if (params.sched_smt) {
ret = sysfs_get_sched("smt");
printf(_("System's thread sibling scheduler setting: "));
if (ret < 0)
/* if sysfs file is missing it's: errno == ENOENT */
printf(_("not supported\n"));
else
printf("%d\n", ret);
}
/* Add more per cpu options here */
if (!params.perf_bias)
return ret;
if (params.perf_bias) {
if (!run_as_root) {
params.perf_bias = 0;
printf(_("Intel's performance bias setting needs root privileges\n"));
} else if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_PERF_BIAS)) {
printf(_("System does not support Intel's performance"
" bias setting\n"));
params.perf_bias = 0;
}
}
/* loop over CPUs */
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
if (!bitmask_isbitset(cpus_chosen, cpu) ||
cpufreq_cpu_exists(cpu))
continue;
printf(_("analyzing CPU %d:\n"), cpu);
if (params.perf_bias) {
ret = msr_intel_get_perf_bias(cpu);
if (ret < 0) {
printf(_("Could not read perf-bias value\n"));
break;
} else
printf(_("perf-bias: %d\n"), ret);
}
}
return ret;
}
| gpl-2.0 |
Albinoman887/android_kernel_htc_msm8660 | arch/cris/mm/ioremap.c | 12468 | 2367 | /*
* arch/cris/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
* Needed for memory-mapped I/O devices mapped outside our normal DRAM
* window (that is, all memory-mapped I/O devices).
*
* (C) Copyright 1995 1996 Linus Torvalds
* CRIS-port by Axis Communications AB
*/
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <asm/pgalloc.h>
#include <arch/memmap.h>
/*
* Generic mapping function (not visible outside):
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
void __iomem * addr;
struct vm_struct * area;
unsigned long offset, last_addr;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = (void __iomem *)area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, prot)) {
vfree((void __force *)addr);
return NULL;
}
return (void __iomem *) (offset + (char __iomem *)addr);
}
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
return __ioremap_prot(phys_addr, size,
__pgprot(_PAGE_PRESENT | __READABLE |
__WRITEABLE | _PAGE_GLOBAL |
_PAGE_KERNEL | flags));
}
/**
* ioremap_nocache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* Must be freed with iounmap.
*/
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
}
void iounmap(volatile void __iomem *addr)
{
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
| gpl-2.0 |
ac0ra/huawei-s7-dev | sound/soc/atmel/atmel_ssc_dai.c | 181 | 22574 | /*
* atmel_ssc_dai.c -- ALSA SoC ATMEL SSC Audio Layer Platform driver
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2008 Atmel
*
* Author: Sedji Gaouaou <sedji.gaouaou@atmel.com>
* ATMEL CORP.
*
* Based on at91-ssc.c by
* Frank Mandarino <fmandarino@endrelia.com>
* Based on pxa2xx Platform drivers by
* Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <mach/hardware.h>
#include "atmel-pcm.h"
#include "atmel_ssc_dai.h"
#if defined(CONFIG_ARCH_AT91SAM9260) || defined(CONFIG_ARCH_AT91SAM9G20)
#define NUM_SSC_DEVICES 1
#else
#define NUM_SSC_DEVICES 3
#endif
/*
* SSC PDC registers required by the PCM DMA engine.
*/
static struct atmel_pdc_regs pdc_tx_reg = {
.xpr = ATMEL_PDC_TPR,
.xcr = ATMEL_PDC_TCR,
.xnpr = ATMEL_PDC_TNPR,
.xncr = ATMEL_PDC_TNCR,
};
static struct atmel_pdc_regs pdc_rx_reg = {
.xpr = ATMEL_PDC_RPR,
.xcr = ATMEL_PDC_RCR,
.xnpr = ATMEL_PDC_RNPR,
.xncr = ATMEL_PDC_RNCR,
};
/*
* SSC & PDC status bits for transmit and receive.
*/
static struct atmel_ssc_mask ssc_tx_mask = {
.ssc_enable = SSC_BIT(CR_TXEN),
.ssc_disable = SSC_BIT(CR_TXDIS),
.ssc_endx = SSC_BIT(SR_ENDTX),
.ssc_endbuf = SSC_BIT(SR_TXBUFE),
.pdc_enable = ATMEL_PDC_TXTEN,
.pdc_disable = ATMEL_PDC_TXTDIS,
};
static struct atmel_ssc_mask ssc_rx_mask = {
.ssc_enable = SSC_BIT(CR_RXEN),
.ssc_disable = SSC_BIT(CR_RXDIS),
.ssc_endx = SSC_BIT(SR_ENDRX),
.ssc_endbuf = SSC_BIT(SR_RXBUFF),
.pdc_enable = ATMEL_PDC_RXTEN,
.pdc_disable = ATMEL_PDC_RXTDIS,
};
/*
* DMA parameters.
*/
static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
{{
.name = "SSC0 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC0 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
#if NUM_SSC_DEVICES == 3
{{
.name = "SSC1 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC1 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
{{
.name = "SSC2 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC2 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
#endif
};
static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
{
.name = "ssc0",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[0].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
#if NUM_SSC_DEVICES == 3
{
.name = "ssc1",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[1].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc2",
.lock = __SPIN_LOCK_UNLOCKED(ssc_info[2].lock),
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
#endif
};
/*
* SSC interrupt handler. Passes PDC interrupts to the DMA
* interrupt handler in the PCM driver.
*/
static irqreturn_t atmel_ssc_interrupt(int irq, void *dev_id)
{
struct atmel_ssc_info *ssc_p = dev_id;
struct atmel_pcm_dma_params *dma_params;
u32 ssc_sr;
u32 ssc_substream_mask;
int i;
ssc_sr = (unsigned long)ssc_readl(ssc_p->ssc->regs, SR)
& (unsigned long)ssc_readl(ssc_p->ssc->regs, IMR);
/*
* Loop through the substreams attached to this SSC. If
* a DMA-related interrupt occurred on that substream, call
* the DMA interrupt handler function, if one has been
* registered in the dma_params structure by the PCM driver.
*/
for (i = 0; i < ARRAY_SIZE(ssc_p->dma_params); i++) {
dma_params = ssc_p->dma_params[i];
if ((dma_params != NULL) &&
(dma_params->dma_intr_handler != NULL)) {
ssc_substream_mask = (dma_params->mask->ssc_endx |
dma_params->mask->ssc_endbuf);
if (ssc_sr & ssc_substream_mask) {
dma_params->dma_intr_handler(ssc_sr,
dma_params->
substream);
}
}
}
return IRQ_HANDLED;
}
/*-------------------------------------------------------------------------*\
* DAI functions
\*-------------------------------------------------------------------------*/
/*
* Startup. Only that one substream allowed in each direction.
*/
static int atmel_ssc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
int dir_mask;
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir_mask = SSC_DIR_MASK_PLAYBACK;
else
dir_mask = SSC_DIR_MASK_CAPTURE;
spin_lock_irq(&ssc_p->lock);
if (ssc_p->dir_mask & dir_mask) {
spin_unlock_irq(&ssc_p->lock);
return -EBUSY;
}
ssc_p->dir_mask |= dir_mask;
spin_unlock_irq(&ssc_p->lock);
return 0;
}
/*
* Shutdown. Clear DMA parameters and shutdown the SSC if there
* are no other substreams open.
*/
static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
if (dma_params != NULL) {
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
pr_debug("atmel_ssc_shutdown: %s disabled SSC_SR=0x%08x\n",
(dir ? "receive" : "transmit"),
ssc_readl(ssc_p->ssc->regs, SR));
dma_params->ssc = NULL;
dma_params->substream = NULL;
ssc_p->dma_params[dir] = NULL;
}
dir_mask = 1 << dir;
spin_lock_irq(&ssc_p->lock);
ssc_p->dir_mask &= ~dir_mask;
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
/* Shutdown the SSC clock. */
pr_debug("atmel_ssc_dau: Stopping clock\n");
clk_disable(ssc_p->ssc->clk);
free_irq(ssc_p->ssc->irq, ssc_p);
ssc_p->initialized = 0;
}
/* Reset the SSC */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
/* Clear the SSC dividers */
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
}
spin_unlock_irq(&ssc_p->lock);
}
/*
* Record the DAI format for use in hw_params().
*/
static int atmel_ssc_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct atmel_ssc_info *ssc_p = &ssc_info[cpu_dai->id];
ssc_p->daifmt = fmt;
return 0;
}
/*
* Record SSC clock dividers for use in hw_params().
*/
static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct atmel_ssc_info *ssc_p = &ssc_info[cpu_dai->id];
switch (div_id) {
case ATMEL_SSC_CMR_DIV:
/*
* The same master clock divider is used for both
* transmit and receive, so if a value has already
* been set, it must match this value.
*/
if (ssc_p->cmr_div == 0)
ssc_p->cmr_div = div;
else
if (div != ssc_p->cmr_div)
return -EBUSY;
break;
case ATMEL_SSC_TCMR_PERIOD:
ssc_p->tcmr_period = div;
break;
case ATMEL_SSC_RCMR_PERIOD:
ssc_p->rcmr_period = div;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Configure the SSC.
*/
static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
int id = dai->id;
struct atmel_ssc_info *ssc_p = &ssc_info[id];
struct atmel_pcm_dma_params *dma_params;
int dir, channels, bits;
u32 tfmr, rfmr, tcmr, rcmr;
int start_event;
int ret;
/*
* Currently, there is only one set of dma params for
* each direction. If more are added, this code will
* have to be changed to select the proper set.
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = &ssc_dma_params[id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
ssc_p->dma_params[dir] = dma_params;
/*
* The snd_soc_pcm_stream->dma_data field is only used to communicate
* the appropriate DMA parameters to the pcm driver hw_params()
* function. It should not be used for other purposes
* as it is common to all substreams.
*/
snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_params);
channels = params_channels(params);
/*
* Determine sample size in bits and the PDC increment.
*/
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
bits = 8;
dma_params->pdc_xfer_size = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
bits = 16;
dma_params->pdc_xfer_size = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits = 24;
dma_params->pdc_xfer_size = 4;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bits = 32;
dma_params->pdc_xfer_size = 4;
break;
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported PCM format");
return -EINVAL;
}
/*
* The SSC only supports up to 16-bit samples in I2S format, due
* to the size of the Frame Mode Register FSLEN field.
*/
if ((ssc_p->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S
&& bits > 16) {
printk(KERN_WARNING
"atmel_ssc_dai: sample size %d"
"is too large for I2S\n", bits);
return -EINVAL;
}
/*
* Compute SSC register settings.
*/
switch (ssc_p->daifmt
& (SND_SOC_DAIFMT_FORMAT_MASK | SND_SOC_DAIFMT_MASTER_MASK)) {
case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS:
/*
* I2S format, SSC provides BCLK and LRC clocks.
*
* The SSC transmit and receive clocks are generated
* from the MCK divider, and the BCLK signal
* is output on the SSC TK line.
*/
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NEGATIVE)
| SSC_BF(RFMR_FSLEN, (bits - 1))
| SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NEGATIVE)
| SSC_BF(TFMR_FSLEN, (bits - 1))
| SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM:
/*
* I2S format, CODEC supplies BCLK and LRC clocks.
*
* The SSC transmit clock is obtained from the BCLK signal on
* on the TK line, and the SSC receive clock is
* generated from the transmit clock.
*
* For single channel data, one sample is transferred
* on the falling edge of the LRC clock.
* For two channel data, one sample is
* transferred on both edges of the LRC clock.
*/
start_event = ((channels == 1)
? SSC_START_FALLING_RF
: SSC_START_EDGE_RF);
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, start_event)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_CLOCK);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(RFMR_FSLEN, 0)
| SSC_BF(RFMR_DATNB, 0)
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, 0)
| SSC_BF(TCMR_STTDLY, START_DELAY)
| SSC_BF(TCMR_START, start_event)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
| SSC_BF(TCMR_CKS, SSC_CKS_PIN);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(TFMR_FSLEN, 0)
| SSC_BF(TFMR_DATNB, 0)
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBS_CFS:
/*
* DSP/PCM Mode A format, SSC provides BCLK and LRC clocks.
*
* The SSC transmit and receive clocks are generated from the
* MCK divider, and the BCLK signal is output
* on the SSC TK line.
*/
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_POSITIVE)
| SSC_BF(RFMR_FSLEN, 0)
| SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_RISING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
tfmr = SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_POSITIVE)
| SSC_BF(TFMR_FSLEN, 0)
| SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
break;
case SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_CBM_CFM:
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported DAI format 0x%x\n",
ssc_p->daifmt);
return -EINVAL;
}
pr_debug("atmel_ssc_hw_params: "
"RCMR=%08x RFMR=%08x TCMR=%08x TFMR=%08x\n",
rcmr, rfmr, tcmr, tfmr);
if (!ssc_p->initialized) {
/* Enable PMC peripheral clock for this SSC */
pr_debug("atmel_ssc_dai: Starting clock\n");
clk_enable(ssc_p->ssc->clk);
/* Reset the SSC and its PDC registers */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
ssc_writel(ssc_p->ssc->regs, PDC_RPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNCR, 0);
ret = request_irq(ssc_p->ssc->irq, atmel_ssc_interrupt, 0,
ssc_p->name, ssc_p);
if (ret < 0) {
printk(KERN_WARNING
"atmel_ssc_dai: request_irq failure\n");
pr_debug("Atmel_ssc_dai: Stoping clock\n");
clk_disable(ssc_p->ssc->clk);
return ret;
}
ssc_p->initialized = 1;
}
/* set SSC clock mode register */
ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->cmr_div);
/* set receive clock mode and format */
ssc_writel(ssc_p->ssc->regs, RCMR, rcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, rfmr);
/* set transmit clock mode and format */
ssc_writel(ssc_p->ssc->regs, TCMR, tcmr);
ssc_writel(ssc_p->ssc->regs, TFMR, tfmr);
pr_debug("atmel_ssc_dai,hw_params: SSC initialized\n");
return 0;
}
static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
struct atmel_pcm_dma_params *dma_params;
int dir;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
pr_debug("%s enabled SSC_SR=0x%08x\n",
dir ? "receive" : "transmit",
ssc_readl(ssc_p->ssc->regs, SR));
return 0;
}
#ifdef CONFIG_PM
static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
{
struct atmel_ssc_info *ssc_p;
if (!cpu_dai->active)
return 0;
ssc_p = &ssc_info[cpu_dai->id];
/* Save the status register before disabling transmit and receive */
ssc_p->ssc_state.ssc_sr = ssc_readl(ssc_p->ssc->regs, SR);
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_TXDIS) | SSC_BIT(CR_RXDIS));
/* Save the current interrupt mask, then disable unmasked interrupts */
ssc_p->ssc_state.ssc_imr = ssc_readl(ssc_p->ssc->regs, IMR);
ssc_writel(ssc_p->ssc->regs, IDR, ssc_p->ssc_state.ssc_imr);
ssc_p->ssc_state.ssc_cmr = ssc_readl(ssc_p->ssc->regs, CMR);
ssc_p->ssc_state.ssc_rcmr = ssc_readl(ssc_p->ssc->regs, RCMR);
ssc_p->ssc_state.ssc_rfmr = ssc_readl(ssc_p->ssc->regs, RFMR);
ssc_p->ssc_state.ssc_tcmr = ssc_readl(ssc_p->ssc->regs, TCMR);
ssc_p->ssc_state.ssc_tfmr = ssc_readl(ssc_p->ssc->regs, TFMR);
return 0;
}
static int atmel_ssc_resume(struct snd_soc_dai *cpu_dai)
{
struct atmel_ssc_info *ssc_p;
u32 cr;
if (!cpu_dai->active)
return 0;
ssc_p = &ssc_info[cpu_dai->id];
/* restore SSC register settings */
ssc_writel(ssc_p->ssc->regs, TFMR, ssc_p->ssc_state.ssc_tfmr);
ssc_writel(ssc_p->ssc->regs, TCMR, ssc_p->ssc_state.ssc_tcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, ssc_p->ssc_state.ssc_rfmr);
ssc_writel(ssc_p->ssc->regs, RCMR, ssc_p->ssc_state.ssc_rcmr);
ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->ssc_state.ssc_cmr);
/* re-enable interrupts */
ssc_writel(ssc_p->ssc->regs, IER, ssc_p->ssc_state.ssc_imr);
/* Re-enable recieve and transmit as appropriate */
cr = 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_RXEN)) ? SSC_BIT(CR_RXEN) : 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_TXEN)) ? SSC_BIT(CR_TXEN) : 0;
ssc_writel(ssc_p->ssc->regs, CR, cr);
return 0;
}
#else /* CONFIG_PM */
# define atmel_ssc_suspend NULL
# define atmel_ssc_resume NULL
#endif /* CONFIG_PM */
static int atmel_ssc_probe(struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
int ret = 0;
snd_soc_dai_set_drvdata(dai, ssc_p);
/*
* Request SSC device
*/
ssc_p->ssc = ssc_request(dai->id);
if (IS_ERR(ssc_p->ssc)) {
printk(KERN_ERR "ASoC: Failed to request SSC %d\n", dai->id);
ret = PTR_ERR(ssc_p->ssc);
}
return ret;
}
static int atmel_ssc_remove(struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = snd_soc_dai_get_drvdata(dai);
ssc_free(ssc_p->ssc);
return 0;
}
#define ATMEL_SSC_RATES (SNDRV_PCM_RATE_8000_96000)
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.startup = atmel_ssc_startup,
.shutdown = atmel_ssc_shutdown,
.prepare = atmel_ssc_prepare,
.hw_params = atmel_ssc_hw_params,
.set_fmt = atmel_ssc_set_dai_fmt,
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
};
static struct snd_soc_dai_driver atmel_ssc_dai[NUM_SSC_DEVICES] = {
{
.name = "atmel-ssc-dai.0",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
#if NUM_SSC_DEVICES == 3
{
.name = "atmel-ssc-dai.1",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
{
.name = "atmel-ssc-dai.2",
.probe = atmel_ssc_probe,
.remove = atmel_ssc_remove,
.suspend = atmel_ssc_suspend,
.resume = atmel_ssc_resume,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_SSC_RATES,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
},
#endif
};
static __devinit int asoc_ssc_probe(struct platform_device *pdev)
{
BUG_ON(pdev->id < 0);
BUG_ON(pdev->id >= ARRAY_SIZE(atmel_ssc_dai));
return snd_soc_register_dai(&pdev->dev, &atmel_ssc_dai[pdev->id]);
}
static int __devexit asoc_ssc_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&pdev->dev);
return 0;
}
static struct platform_driver asoc_ssc_driver = {
.driver = {
.name = "atmel-ssc-dai",
.owner = THIS_MODULE,
},
.probe = asoc_ssc_probe,
.remove = __devexit_p(asoc_ssc_remove),
};
/**
* atmel_ssc_set_audio - Allocate the specified SSC for audio use.
*/
int atmel_ssc_set_audio(int ssc_id)
{
struct ssc_device *ssc;
static struct platform_device *dma_pdev;
struct platform_device *ssc_pdev;
int ret;
if (ssc_id < 0 || ssc_id >= ARRAY_SIZE(atmel_ssc_dai))
return -EINVAL;
/* Allocate a dummy device for DMA if we don't have one already */
if (!dma_pdev) {
dma_pdev = platform_device_alloc("atmel-pcm-audio", -1);
if (!dma_pdev)
return -ENOMEM;
ret = platform_device_add(dma_pdev);
if (ret < 0) {
platform_device_put(dma_pdev);
dma_pdev = NULL;
return ret;
}
}
ssc_pdev = platform_device_alloc("atmel-ssc-dai", ssc_id);
if (!ssc_pdev) {
ssc_free(ssc);
return -ENOMEM;
}
/* If we can grab the SSC briefly to parent the DAI device off it */
ssc = ssc_request(ssc_id);
if (IS_ERR(ssc))
pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
PTR_ERR(ssc));
else
ssc_pdev->dev.parent = &(ssc->pdev->dev);
ssc_free(ssc);
ret = platform_device_add(ssc_pdev);
if (ret < 0)
platform_device_put(ssc_pdev);
return ret;
}
EXPORT_SYMBOL_GPL(atmel_ssc_set_audio);
static int __init snd_atmel_ssc_init(void)
{
return platform_driver_register(&asoc_ssc_driver);
}
module_init(snd_atmel_ssc_init);
static void __exit snd_atmel_ssc_exit(void)
{
platform_driver_unregister(&asoc_ssc_driver);
}
module_exit(snd_atmel_ssc_exit);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou, sedji.gaouaou@atmel.com, www.atmel.com");
MODULE_DESCRIPTION("ATMEL SSC ASoC Interface");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Abhishek-karmakar/caf-kernel-msm8916 | mm/filemap.c | 437 | 69428 | /*
* linux/mm/filemap.c
*
* Copyright (C) 1994-1999 Linus Torvalds
*/
/*
* This file handles the generic file mmap semantics used by
* most "normal" filesystems (but you don't /have/ to use this:
* the NFS filesystem used to do this differently, for example)
*/
#include <linux/export.h>
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/filemap.h>
/*
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
#include <linux/buffer_head.h> /* for try_to_free_buffers */
#include <asm/mman.h>
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
*
* Shared mappings now work. 15.8.1995 Bruno.
*
* finished 'unifying' the page and buffer cache and SMP-threaded the
* page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
*
* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
*/
/*
* Lock ordering:
*
* ->i_mmap_mutex (truncate_pagecache)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
* ->i_mutex
* ->i_mmap_mutex (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_mutex
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->lock_page (access_process_vm)
*
* ->i_mutex (generic_file_buffered_write)
* ->mmap_sem (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
* ->mapping->tree_lock (__sync_single_inode)
*
* ->i_mmap_mutex
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
* ->page_table_lock or pte_lock (anon_vma_prepare and various)
*
* ->page_table_lock or pte_lock
* ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
* ->zone.lru_lock (check_pte_range->isolate_lru_page)
* ->private_lock (page_remove_rmap->set_page_dirty)
* ->tree_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
* ->inode->i_lock (page_remove_rmap->set_page_dirty)
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
* ->i_mmap_mutex
* ->tasklist_lock (memory_failure, collect_procs_ao)
*/
/*
* Delete a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
* is safe. The caller must hold the mapping's tree_lock.
*/
void __delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
trace_mm_filemap_delete_from_page_cache(page);
/*
* if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave
* stale data around in the cleancache once our page is gone
*/
if (PageUptodate(page) && PageMappedToDisk(page))
cleancache_put_page(page);
else
cleancache_invalidate_page(mapping, page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
if (PageSwapBacked(page))
__dec_zone_page_state(page, NR_SHMEM);
BUG_ON(page_mapped(page));
/*
* Some filesystems seem to re-dirty the page even after
* the VM has canceled the dirty bit (eg ext3 journaling).
*
* Fix it up by doing a final dirty accounting check after
* having removed the page entirely.
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
}
}
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
*
* This must be called only on pages that have been verified to be in the page
* cache and locked. It will never put the page into the free list, the caller
* has a reference on the page.
*/
void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
void (*freepage)(struct page *);
BUG_ON(!PageLocked(page));
freepage = mapping->a_ops->freepage;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
if (freepage)
freepage(page);
page_cache_release(page);
}
EXPORT_SYMBOL(delete_from_page_cache);
static int sleep_on_page(void *word)
{
io_schedule();
return 0;
}
static int sleep_on_page_killable(void *word)
{
sleep_on_page(word);
return fatal_signal_pending(current) ? -EINTR : 0;
}
static int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
/* Check for outstanding write errors */
if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
if (test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
* Start writeback against all of a mapping's dirty pages that lie
* within the byte offsets <start, end> inclusive.
*
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
* opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end, int sync_mode)
{
int ret;
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = LONG_MAX,
.range_start = start,
.range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
return 0;
ret = do_writepages(mapping, &wbc);
return ret;
}
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite);
int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
loff_t end)
{
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite_range);
/**
* filemap_flush - mostly a non-blocking flush
* @mapping: target address_space
*
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
int filemap_flush(struct address_space *mapping)
{
return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
}
EXPORT_SYMBOL(filemap_flush);
/**
* filemap_fdatawait_range - wait for writeback to complete
* @mapping: address space structure to wait for
* @start_byte: offset in bytes where the range starts
* @end_byte: offset in bytes where the range ends (inclusive)
*
* Walk the list of under-writeback pages of the given address space
* in the given range and wait for all of them.
*/
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret2, ret = 0;
if (end_byte < start_byte)
goto out;
pagevec_init(&pvec, 0);
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
unsigned i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
/* until radix tree lookup accepts end_index */
if (page->index > end)
continue;
wait_on_page_writeback(page);
if (TestClearPageError(page))
ret = -EIO;
}
pagevec_release(&pvec);
cond_resched();
}
out:
ret2 = filemap_check_errors(mapping);
if (!ret)
ret = ret2;
return ret;
}
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
* filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
*
* Walk the list of under-writeback pages of the given address space
* and wait for all of them.
*/
int filemap_fdatawait(struct address_space *mapping)
{
loff_t i_size = i_size_read(mapping->host);
if (i_size == 0)
return 0;
return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
EXPORT_SYMBOL(filemap_fdatawait);
int filemap_write_and_wait(struct address_space *mapping)
{
int err = 0;
if (mapping->nrpages) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
* written partially (e.g. -ENOSPC), so we wait for it.
* But the -EIO is special case, it may indicate the worst
* thing (e.g. bug) happened, so we avoid waiting for it.
*/
if (err != -EIO) {
int err2 = filemap_fdatawait(mapping);
if (!err)
err = err2;
}
} else {
err = filemap_check_errors(mapping);
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait);
/**
* filemap_write_and_wait_range - write out & wait on a file range
* @mapping: the address_space for the pages
* @lstart: offset in bytes where the range starts
* @lend: offset in bytes where the range ends (inclusive)
*
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that `lend' is inclusive (describes the last byte to be written) so
* that this function can be used to write to the very end-of-file (end = -1).
*/
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
int err = 0;
if (mapping->nrpages) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
int err2 = filemap_fdatawait_range(mapping,
lstart, lend);
if (!err)
err = err2;
}
} else {
err = filemap_check_errors(mapping);
}
return err;
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
/**
* replace_page_cache_page - replace a pagecache page with a new one
* @old: page to be replaced
* @new: page to replace with
* @gfp_mask: allocation mode
*
* This function replaces a page in the pagecache with a new one. On
* success it acquires the pagecache reference for the new page and
* drops it for the old page. Both the old and new pages must be
* locked. This function does not add the new page to the LRU, the
* caller must do that.
*
* The remove + add is atomic. The only way this function can fail is
* memory allocation failure.
*/
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *);
pgoff_t offset = old->index;
freepage = mapping->a_ops->freepage;
page_cache_get(new);
new->mapping = mapping;
new->index = offset;
spin_lock_irq(&mapping->tree_lock);
__delete_from_page_cache(old);
error = radix_tree_insert(&mapping->page_tree, offset, new);
BUG_ON(error);
mapping->nrpages++;
__inc_zone_page_state(new, NR_FILE_PAGES);
if (PageSwapBacked(new))
__inc_zone_page_state(new, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
/* mem_cgroup codes must not be called under tree_lock */
mem_cgroup_replace_page_cache(old, new);
radix_tree_preload_end();
if (freepage)
freepage(old);
page_cache_release(old);
}
return error;
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
/**
* add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
*
* This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapBacked(page));
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
page_cache_get(page);
page->mapping = mapping;
page->index = offset;
spin_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
if (likely(!error)) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
spin_unlock_irq(&mapping->tree_lock);
trace_mm_filemap_add_to_page_cache(page);
} else {
page->mapping = NULL;
/* Leave page->index set: truncation relies upon it */
spin_unlock_irq(&mapping->tree_lock);
mem_cgroup_uncharge_cache_page(page);
page_cache_release(page);
}
radix_tree_preload_end();
} else
mem_cgroup_uncharge_cache_page(page);
out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache_locked);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
int ret;
ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
lru_cache_add_file(page);
return ret;
}
EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
#ifdef CONFIG_NUMA
struct page *__page_cache_alloc(gfp_t gfp)
{
int n;
struct page *page;
if (cpuset_do_page_mem_spread()) {
unsigned int cpuset_mems_cookie;
do {
cpuset_mems_cookie = get_mems_allowed();
n = cpuset_mem_spread_node();
page = alloc_pages_exact_node(n, gfp, 0);
} while (!put_mems_allowed(cpuset_mems_cookie) && !page);
return page;
}
return alloc_pages(gfp, 0);
}
EXPORT_SYMBOL(__page_cache_alloc);
#endif
/*
* In order to wait for pages to become available there must be
* waitqueues associated with pages. By using a hash table of
* waitqueues where the bucket discipline is to maintain all
* waiters on the same queue and wake all when any of the pages
* become available, and for the woken contexts to check to be
* sure the appropriate page became available, this saves space
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
static wait_queue_head_t *page_waitqueue(struct page *page)
{
const struct zone *zone = page_zone(page);
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
}
static inline void wake_up_page(struct page *page, int bit)
{
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (test_bit(bit_nr, &page->flags))
__wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(wait_on_page_bit);
int wait_on_page_bit_killable(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
if (!test_bit(bit_nr, &page->flags))
return 0;
return __wait_on_bit(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
/**
* add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
* @page: Page defining the wait queue of interest
* @waiter: Waiter to add to the queue
*
* Add an arbitrary @waiter to the wait queue for the nominated @page.
*/
void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
{
wait_queue_head_t *q = page_waitqueue(page);
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, waiter);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL_GPL(add_page_wait_queue);
/**
* unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
* mechananism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
*
* The mb is necessary to enforce ordering between the clear_bit and the read
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
*/
void unlock_page(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_atomic();
wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
/**
* end_page_writeback - end writeback against a page
* @page: the page
*/
void end_page_writeback(struct page *page)
{
if (TestClearPageReclaim(page))
rotate_reclaimable_page(page);
if (!test_clear_page_writeback(page))
BUG();
smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
/**
* __lock_page - get a lock on the page, assuming we need to sleep to get it
* @page: the page to lock
*/
void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_page);
int __lock_page_killable(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
return __wait_on_bit_lock(page_waitqueue(page), &wait,
sleep_on_page_killable, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__lock_page_killable);
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (flags & FAULT_FLAG_ALLOW_RETRY) {
/*
* CAUTION! In this case, mmap_sem is not released
* even though return 0.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
up_read(&mm->mmap_sem);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
wait_on_page_locked(page);
return 0;
} else {
if (flags & FAULT_FLAG_KILLABLE) {
int ret;
ret = __lock_page_killable(page);
if (ret) {
up_read(&mm->mmap_sem);
return 0;
}
} else
__lock_page(page);
return 1;
}
}
/**
* find_get_page - find and get a page reference
* @mapping: the address_space to search
* @offset: the page index
*
* Is there a pagecache struct page at the given (mapping, offset) tuple?
* If yes, increment its refcount and return it; if no, return NULL.
*/
struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
{
void **pagep;
struct page *page;
rcu_read_lock();
repeat:
page = NULL;
pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
if (pagep) {
page = radix_tree_deref_slot(pagep);
if (unlikely(!page))
goto out;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto repeat;
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so return it without
* attempting to raise page count.
*/
goto out;
}
if (!page_cache_get_speculative(page))
goto repeat;
/*
* Has the page moved?
* This is part of the lockless pagecache protocol. See
* include/linux/pagemap.h for details.
*/
if (unlikely(page != *pagep)) {
page_cache_release(page);
goto repeat;
}
}
out:
rcu_read_unlock();
return page;
}
EXPORT_SYMBOL(find_get_page);
/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
*
* Locates the desired pagecache page, locks it, increments its reference
* count and returns its address.
*
* Returns zero if the page was not present. find_lock_page() may sleep.
*/
struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
{
struct page *page;
repeat:
page = find_get_page(mapping, offset);
if (page && !radix_tree_exception(page)) {
lock_page(page);
/* Has the page been truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
page_cache_release(page);
goto repeat;
}
VM_BUG_ON(page->index != offset);
}
return page;
}
EXPORT_SYMBOL(find_lock_page);
/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
*
* Locates a page in the pagecache. If the page is not present, a new page
* is allocated using @gfp_mask and is added to the pagecache and to the VM's
* LRU list. The returned page is locked and has its reference count
* incremented.
*
* find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
* allocation!
*
* find_or_create_page() returns the desired page's address, or zero on
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask)
{
struct page *page;
int err;
repeat:
page = find_lock_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
/*
* We want a regular kernel memory (not highmem or DMA etc)
* allocation for the radix tree nodes, but we need to honour
* the context-specific requirements the caller has asked for.
* GFP_RECLAIM_MASK collects those requirements.
*/
err = add_to_page_cache_lru(page, mapping, index,
(gfp_mask & GFP_RECLAIM_MASK));
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
if (err == -EEXIST)
goto repeat;
}
}
return page;
}
EXPORT_SYMBOL(find_or_create_page);
/**
* find_get_pages - gang pagecache lookup
* @mapping: The address_space to search
* @start: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages() will search for and return a group of up to
* @nr_pages pages in the mapping. The pages are placed at @pages.
* find_get_pages() takes a reference against the returned pages.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
*
* find_get_pages() returns the number of pages which were found.
*/
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
WARN_ON(iter.index);
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so skip over it -
* we only reach this from invalidate_mapping_pages().
*/
continue;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
/**
* find_get_pages_contig - gang contiguous pagecache lookup
* @mapping: The address_space to search
* @index: The starting page index
* @nr_pages: The maximum number of pages
* @pages: Where the resulting pages are placed
*
* find_get_pages_contig() works exactly like find_get_pages(), except
* that the returned number of pages are guaranteed to be contiguous.
*
* find_get_pages_contig() returns the number of pages which were found.
*/
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned int ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
/* The hole, there no reason to continue */
if (unlikely(!page))
break;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* Otherwise, shmem/tmpfs must be storing a swap entry
* here as an exceptional entry: so stop looking for
* contiguous pages.
*/
break;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
/*
* must check mapping and index after taking the ref.
* otherwise we can get both false positives and false
* negatives, which is just confusing to the caller.
*/
if (page->mapping == NULL || page->index != iter.index) {
page_cache_release(page);
break;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(find_get_pages_contig);
/**
* find_get_pages_tag - find and return pages that match @tag
* @mapping: the address_space to search
* @index: the starting page index
* @tag: the tag index
* @nr_pages: the maximum number of pages
* @pages: where the resulting pages are placed
*
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
unsigned ret = 0;
if (unlikely(!nr_pages))
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_tagged(slot, &mapping->page_tree,
&iter, *index, tag) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
/*
* Transient condition which can only trigger
* when entry at index 0 moves out of or back
* to root: none yet gotten, safe to restart.
*/
goto restart;
}
/*
* This function is never used on a shmem/tmpfs
* mapping, so a swap entry won't be found here.
*/
BUG();
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
if (ret)
*index = pages[ret - 1]->index + 1;
return ret;
}
EXPORT_SYMBOL(find_get_pages_tag);
/**
* grab_cache_page_nowait - returns locked page at given index in given cache
* @mapping: target address_space
* @index: the page index
*
* Same as grab_cache_page(), but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
*
* Clear __GFP_FS when allocating the page to avoid recursion into the fs
* and deadlock against the caller's locked page.
*/
struct page *
grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
{
struct page *page = find_get_page(mapping, index);
if (page) {
if (trylock_page(page))
return page;
page_cache_release(page);
return NULL;
}
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
page_cache_release(page);
page = NULL;
}
return page;
}
EXPORT_SYMBOL(grab_cache_page_nowait);
/*
* CD/DVDs are error prone. When a medium error occurs, the driver may fail
* a _large_ part of the i/o request. Imagine the worst scenario:
*
* ---R__________________________________________B__________
* ^ reading here ^ bad block(assume 4k)
*
* read(R) => miss => readahead(R...B) => media error => frustrating retries
* => failing the whole request => read(R) => read(R+1) =>
* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
*
* It is going insane. Fix it by quickly scaling down the readahead size.
*/
static void shrink_readahead_size_eio(struct file *filp,
struct file_ra_state *ra)
{
ra->ra_pages /= 4;
}
/**
* do_generic_file_read - generic file read routine
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
* @actor: read method
*
* This is a generic file read routine, and uses the
* mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*/
static void do_generic_file_read(struct file *filp, loff_t *ppos,
read_descriptor_t *desc, read_actor_t actor)
{
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
struct file_ra_state *ra = &filp->f_ra;
pgoff_t index;
pgoff_t last_index;
pgoff_t prev_index;
unsigned long offset; /* offset into pagecache page */
unsigned int prev_offset;
int error;
index = *ppos >> PAGE_CACHE_SHIFT;
prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
struct page *page;
pgoff_t end_index;
loff_t isize;
unsigned long nr, ret;
cond_resched();
find_page:
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page)) {
if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
if (!trylock_page(page))
goto page_not_up_to_date;
/* Did it get truncated before we got the lock? */
if (!page->mapping)
goto page_not_up_to_date_locked;
if (!mapping->a_ops->is_partially_uptodate(page,
desc, offset))
goto page_not_up_to_date_locked;
unlock_page(page);
}
page_ok:
/*
* i_size must be checked after we know the page is Uptodate.
*
* Checking i_size after the check allows us to calculate
* the correct value for "nr", which means the zero-filled
* part of the page is not copied back to userspace (unless
* another truncate extends the file - this is desired though).
*/
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index)) {
page_cache_release(page);
goto out;
}
/* nr is the maximum number of bytes to copy from this page */
nr = PAGE_CACHE_SIZE;
if (index == end_index) {
nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (nr <= offset) {
page_cache_release(page);
goto out;
}
}
nr = nr - offset;
/* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* When a sequential read accesses a page several times,
* only mark it as accessed the first time.
*/
if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
ret = actor(desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
continue;
goto out;
page_not_up_to_date:
/* Get exclusive access to the page ... */
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
page_not_up_to_date_locked:
/* Did it get truncated before we got the lock? */
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
continue;
}
/* Did somebody else fill it already? */
if (PageUptodate(page)) {
unlock_page(page);
goto page_ok;
}
readpage:
/*
* A previous I/O error may have been due to temporary
* failures, eg. multipath errors.
* PG_error will be set again if readpage fails.
*/
ClearPageError(page);
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
}
goto readpage_error;
}
if (!PageUptodate(page)) {
error = lock_page_killable(page);
if (unlikely(error))
goto readpage_error;
if (!PageUptodate(page)) {
if (page->mapping == NULL) {
/*
* invalidate_mapping_pages got it
*/
unlock_page(page);
page_cache_release(page);
goto find_page;
}
unlock_page(page);
shrink_readahead_size_eio(filp, ra);
error = -EIO;
goto readpage_error;
}
unlock_page(page);
}
goto page_ok;
readpage_error:
/* UHHUH! A synchronous read error occurred. Report it */
desc->error = error;
page_cache_release(page);
goto out;
no_cached_page:
/*
* Ok, it wasn't cached, so we need to create a new
* page..
*/
page = page_cache_alloc_cold(mapping);
if (!page) {
desc->error = -ENOMEM;
goto out;
}
error = add_to_page_cache_lru(page, mapping,
index, GFP_KERNEL);
if (error) {
page_cache_release(page);
if (error == -EEXIST)
goto find_page;
desc->error = error;
goto out;
}
goto readpage;
}
out:
ra->prev_pos = prev_index;
ra->prev_pos <<= PAGE_CACHE_SHIFT;
ra->prev_pos |= prev_offset;
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
}
int file_read_actor(read_descriptor_t *desc, struct page *page,
unsigned long offset, unsigned long size)
{
char *kaddr;
unsigned long left, count = desc->count;
if (size > count)
size = count;
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
*/
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
kaddr = kmap_atomic(page);
left = __copy_to_user_inatomic(desc->arg.buf,
kaddr + offset, size);
kunmap_atomic(kaddr);
if (left == 0)
goto success;
}
/* Do it the slow way */
kaddr = kmap(page);
left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
kunmap(page);
if (left) {
size -= left;
desc->error = -EFAULT;
}
success:
desc->count = count - size;
desc->written += size;
desc->arg.buf += size;
return size;
}
/*
* Performs necessary checks before doing a write
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @count: number of bytes to write
* @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
*
* Adjust number of segments and amount of bytes to write (nr_segs should be
* properly initialized first). Returns appropriate error code that caller
* should return or zero in case that write should be allowed.
*/
int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags)
{
unsigned long seg;
size_t cnt = 0;
for (seg = 0; seg < *nr_segs; seg++) {
const struct iovec *iv = &iov[seg];
/*
* If any segment has a negative length, or the cumulative
* length ever wraps negative then return -EINVAL.
*/
cnt += iv->iov_len;
if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
return -EINVAL;
if (access_ok(access_flags, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return -EFAULT;
*nr_segs = seg;
cnt -= iv->iov_len; /* This segment is no good */
break;
}
*count = cnt;
return 0;
}
EXPORT_SYMBOL(generic_segment_checks);
/**
* generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
* @iov: io vector request
* @nr_segs: number of segments in the iovec
* @pos: current file position
*
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
ssize_t
generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
unsigned long seg = 0;
size_t count;
loff_t *ppos = &iocb->ki_pos;
count = 0;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
struct address_space *mapping;
struct inode *inode;
mapping = filp->f_mapping;
inode = mapping->host;
if (!count)
goto out; /* skip atime */
size = i_size_read(inode);
if (pos < size) {
retval = filemap_write_and_wait_range(mapping, pos,
pos + iov_length(iov, nr_segs) - 1);
if (!retval) {
retval = mapping->a_ops->direct_IO(READ, iocb,
iov, pos, nr_segs);
}
if (retval > 0) {
*ppos = pos + retval;
count -= retval;
}
/*
* Btrfs can have a short DIO read if we encounter
* compressed extents, so if there was an error, or if
* we've already read everything we wanted to, or if
* there was a short read because we hit EOF, go ahead
* and return. Otherwise fallthrough to buffered io for
* the rest of the read.
*/
if (retval < 0 || !count || *ppos >= size) {
file_accessed(filp);
goto out;
}
}
}
count = retval;
for (seg = 0; seg < nr_segs; seg++) {
read_descriptor_t desc;
loff_t offset = 0;
/*
* If we did a short DIO read we need to skip the section of the
* iov that we've already read data into.
*/
if (count) {
if (count > iov[seg].iov_len) {
count -= iov[seg].iov_len;
continue;
}
offset = count;
count = 0;
}
desc.written = 0;
desc.arg.buf = iov[seg].iov_base + offset;
desc.count = iov[seg].iov_len - offset;
if (desc.count == 0)
continue;
desc.error = 0;
do_generic_file_read(filp, ppos, &desc, file_read_actor);
retval += desc.written;
if (desc.error) {
retval = retval ?: desc.error;
break;
}
if (desc.count > 0)
break;
}
out:
return retval;
}
EXPORT_SYMBOL(generic_file_aio_read);
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int ret;
do {
page = page_cache_alloc_cold(mapping);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
ret = 0; /* losing race to add is OK */
page_cache_release(page);
} while (ret == AOP_TRUNCATED_PAGE);
return ret;
}
#define MMAP_LOTSAMISS (100)
/*
* Synchronous readahead happens when we don't even find
* a page in the page cache at all.
*/
static void do_sync_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
pgoff_t offset)
{
unsigned long ra_pages;
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (!ra->ra_pages)
return;
if (VM_SequentialReadHint(vma)) {
page_cache_sync_readahead(mapping, ra, file, offset,
ra->ra_pages);
return;
}
/* Avoid banging the cache line if not needed */
if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
ra->mmap_miss++;
/*
* Do we miss much more than hit in this file? If so,
* stop bothering with read-ahead. It will only hurt.
*/
if (ra->mmap_miss > MMAP_LOTSAMISS)
return;
/*
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages);
ra->start = max_t(long, 0, offset - ra_pages / 2);
ra->size = ra_pages;
ra->async_size = ra_pages / 4;
ra_submit(ra, mapping, file);
}
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further..
*/
static void do_async_mmap_readahead(struct vm_area_struct *vma,
struct file_ra_state *ra,
struct file *file,
struct page *page,
pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
return;
if (ra->mmap_miss > 0)
ra->mmap_miss--;
if (PageReadahead(page))
page_cache_async_readahead(mapping, ra, file,
page, offset, ra->ra_pages);
}
/**
* filemap_fault - read in file data for page fault handling
* @vma: vma in which the fault was taken
* @vmf: struct vm_fault containing details of the fault
*
* filemap_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
* The goto's are kind of ugly, but this streamlines the normal case of having
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*/
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int error;
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct file_ra_state *ra = &file->f_ra;
struct inode *inode = mapping->host;
pgoff_t offset = vmf->pgoff;
struct page *page;
pgoff_t size;
int ret = 0;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (offset >= size)
return VM_FAULT_SIGBUS;
/*
* Do we have something in the page cache already?
*/
page = find_get_page(mapping, offset);
if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
/*
* We found the page, so try async readahead before
* waiting for the lock.
*/
do_async_mmap_readahead(vma, ra, file, page, offset);
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
if (!page)
goto no_cached_page;
}
if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
page_cache_release(page);
return ret | VM_FAULT_RETRY;
}
/* Did it get truncated? */
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
put_page(page);
goto retry_find;
}
VM_BUG_ON(page->index != offset);
/*
* We have a locked page in the page cache, now we need to check
* that it's up-to-date. If not, it is going to be due to an error.
*/
if (unlikely(!PageUptodate(page)))
goto page_not_uptodate;
/*
* Found the page and have a reference on it.
* We must recheck i_size under page lock.
*/
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(offset >= size)) {
unlock_page(page);
page_cache_release(page);
return VM_FAULT_SIGBUS;
}
vmf->page = page;
return ret | VM_FAULT_LOCKED;
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
* effect.
*/
error = page_cache_read(file, offset);
/*
* The page we want has now been added to the page cache.
* In the unlikely event that someone removed it in the
* meantime, we'll just come back here and read it again.
*/
if (error >= 0)
goto retry_find;
/*
* An error return from page_cache_read can result if the
* system is low on memory, or a problem occurs while trying
* to schedule I/O.
*/
if (error == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
page_not_uptodate:
/*
* Umm, take care of errors if the page isn't up-to-date.
* Try to re-read it _once_. We do this synchronously,
* because there really aren't any performance issues here
* and we need to check for errors.
*/
ClearPageError(page);
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (!PageUptodate(page))
error = -EIO;
}
page_cache_release(page);
if (!error || error == AOP_TRUNCATED_PAGE)
goto retry_find;
/* Things didn't work out. Return zero to tell the mm layer so. */
shrink_readahead_size_eio(file, ra);
return VM_FAULT_SIGBUS;
}
EXPORT_SYMBOL(filemap_fault);
int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
int ret = VM_FAULT_LOCKED;
sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* We mark the page dirty already here so that when freeze is in
* progress, we are guaranteed that writeback during freezing will
* see the dirty page and writeprotect it again.
*/
set_page_dirty(page);
wait_for_stable_page(page);
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = filemap_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
/* This is used for a general mmap of a disk file */
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &generic_file_vm_ops;
return 0;
}
/*
* This is for filesystems which do not implement ->writepage.
*/
int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
return generic_file_mmap(file, vma);
}
#else
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
{
return -ENOSYS;
}
#endif /* CONFIG_MMU */
EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap);
static struct page *__read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
repeat:
page = find_get_page(mapping, index);
if (!page) {
page = __page_cache_alloc(gfp | __GFP_COLD);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp);
if (unlikely(err)) {
page_cache_release(page);
if (err == -EEXIST)
goto repeat;
/* Presumably ENOMEM for radix tree node */
return ERR_PTR(err);
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
page = ERR_PTR(err);
}
}
return page;
}
static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
gfp_t gfp)
{
struct page *page;
int err;
retry:
page = __read_cache_page(mapping, index, filler, data, gfp);
if (IS_ERR(page))
return page;
if (PageUptodate(page))
goto out;
lock_page(page);
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
goto retry;
}
if (PageUptodate(page)) {
unlock_page(page);
goto out;
}
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
return ERR_PTR(err);
}
out:
mark_page_accessed(page);
return page;
}
/**
* read_cache_page_async - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Same as read_cache_page, but don't wait for page to become unlocked
* after submitting it to the filler.
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page but don't wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_async(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(read_cache_page_async);
static struct page *wait_on_page_read(struct page *page)
{
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page)) {
page_cache_release(page);
page = ERR_PTR(-EIO);
}
}
return page;
}
/**
* read_cache_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This is the same as "read_mapping_page(mapping, index, NULL)", but with
* any new page allocations done using the specified allocation flags.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page_gfp(struct address_space *mapping,
pgoff_t index,
gfp_t gfp)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
}
EXPORT_SYMBOL(read_cache_page_gfp);
/**
* read_cache_page - read into page cache, fill it if needed
* @mapping: the page's address_space
* @index: the page index
* @filler: function to perform the read
* @data: first arg to filler(data, page) function, often left as NULL
*
* Read into the page cache. If a page already exists, and PageUptodate() is
* not set, try to fill the page then wait for it to become unlocked.
*
* If the page does not get brought uptodate, return -EIO.
*/
struct page *read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data)
{
return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
}
EXPORT_SYMBOL(read_cache_page);
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
size_t copied = 0, left = 0;
while (bytes) {
char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base);
base = 0;
left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
iov++;
if (unlikely(left))
break;
}
return copied - left;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr;
size_t copied;
BUG_ON(!in_atomic());
kaddr = kmap_atomic(page);
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
i->iov, i->iov_offset, bytes);
}
kunmap_atomic(kaddr);
return copied;
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t iov_iter_copy_from_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr;
size_t copied;
kaddr = kmap(page);
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user(kaddr + offset, buf, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
i->iov, i->iov_offset, bytes);
}
kunmap(page);
return copied;
}
EXPORT_SYMBOL(iov_iter_copy_from_user);
void iov_iter_advance(struct iov_iter *i, size_t bytes)
{
BUG_ON(i->count < bytes);
if (likely(i->nr_segs == 1)) {
i->iov_offset += bytes;
i->count -= bytes;
} else {
const struct iovec *iov = i->iov;
size_t base = i->iov_offset;
unsigned long nr_segs = i->nr_segs;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while (bytes || unlikely(i->count && !iov->iov_len)) {
int copy;
copy = min(bytes, iov->iov_len - base);
BUG_ON(!i->count || i->count < copy);
i->count -= copy;
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
i->iov = iov;
i->iov_offset = base;
i->nr_segs = nr_segs;
}
}
EXPORT_SYMBOL(iov_iter_advance);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
char __user *buf = i->iov->iov_base + i->iov_offset;
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
return fault_in_pages_readable(buf, bytes);
}
EXPORT_SYMBOL(iov_iter_fault_in_readable);
/*
* Return the count of just the current iov_iter segment.
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
const struct iovec *iov = i->iov;
if (i->nr_segs == 1)
return i->count;
else
return min(i->count, iov->iov_len - i->iov_offset);
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
/*
* Performs necessary checks before doing a write
*
* Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
{
struct inode *inode = file->f_mapping->host;
unsigned long limit = rlimit(RLIMIT_FSIZE);
if (unlikely(*pos < 0))
return -EINVAL;
if (!isblk) {
/* FIXME: this is for backwards compatibility with 2.4 */
if (file->f_flags & O_APPEND)
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
if (*pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
if (*count > limit - (typeof(limit))*pos) {
*count = limit - (typeof(limit))*pos;
}
}
}
/*
* LFS rule
*/
if (unlikely(*pos + *count > MAX_NON_LFS &&
!(file->f_flags & O_LARGEFILE))) {
if (*pos >= MAX_NON_LFS) {
return -EFBIG;
}
if (*count > MAX_NON_LFS - (unsigned long)*pos) {
*count = MAX_NON_LFS - (unsigned long)*pos;
}
}
/*
* Are we about to exceed the fs block limit ?
*
* If we have written data it becomes a short write. If we have
* exceeded without writing data we send a signal and return EFBIG.
* Linus frestrict idea will clean these up nicely..
*/
if (likely(!isblk)) {
if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
if (*count || *pos > inode->i_sb->s_maxbytes) {
return -EFBIG;
}
/* zero-length writes at ->s_maxbytes are OK */
}
if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
*count = inode->i_sb->s_maxbytes - *pos;
} else {
#ifdef CONFIG_BLOCK
loff_t isize;
if (bdev_read_only(I_BDEV(inode)))
return -EPERM;
isize = i_size_read(inode);
if (*pos >= isize) {
if (*count || *pos > isize)
return -ENOSPC;
}
if (*pos + *count > isize)
*count = isize - *pos;
#else
return -EPERM;
#endif
}
return 0;
}
EXPORT_SYMBOL(generic_write_checks);
int pagecache_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
return aops->write_begin(file, mapping, pos, len, flags,
pagep, fsdata);
}
EXPORT_SYMBOL(pagecache_write_begin);
int pagecache_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
const struct address_space_operations *aops = mapping->a_ops;
mark_page_accessed(page);
return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
}
EXPORT_SYMBOL(pagecache_write_end);
ssize_t
generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long *nr_segs, loff_t pos, loff_t *ppos,
size_t count, size_t ocount)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t written;
size_t write_len;
pgoff_t end;
if (count != ocount)
*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
write_len = iov_length(iov, *nr_segs);
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
if (written)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (mapping->nrpages) {
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
/*
* If a page can not be invalidated, return 0 to fall back
* to buffered write.
*/
if (written) {
if (written == -EBUSY)
return 0;
goto out;
}
}
written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
if (written > 0) {
pos += written;
if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
*ppos = pos;
}
out:
return written;
}
EXPORT_SYMBOL(generic_file_direct_write);
/*
* Find or create a page at the given pagecache position. Return the locked
* page. This function is specifically for buffered writes.
*/
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
int status;
gfp_t gfp_mask;
struct page *page;
gfp_t gfp_notmask = 0;
gfp_mask = mapping_gfp_mask(mapping);
if (mapping_cap_account_dirty(mapping))
gfp_mask |= __GFP_WRITE;
if (flags & AOP_FLAG_NOFS)
gfp_notmask = __GFP_FS;
repeat:
page = find_lock_page(mapping, index);
if (page)
goto found;
retry:
page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
if (!page)
return NULL;
if (is_cma_pageblock(page)) {
__free_page(page);
gfp_notmask |= __GFP_MOVABLE;
goto retry;
}
status = add_to_page_cache_lru(page, mapping, index,
GFP_KERNEL & ~gfp_notmask);
if (unlikely(status)) {
page_cache_release(page);
if (status == -EEXIST)
goto repeat;
return NULL;
}
found:
wait_for_stable_page(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
static ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, loff_t pos)
{
struct address_space *mapping = file->f_mapping;
const struct address_space_operations *a_ops = mapping->a_ops;
long status = 0;
ssize_t written = 0;
unsigned int flags = 0;
/*
* Copies from kernel address space cannot fail (NFSD is a big user).
*/
if (segment_eq(get_fs(), KERNEL_DS))
flags |= AOP_FLAG_UNINTERRUPTIBLE;
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
void *fsdata;
offset = (pos & (PAGE_CACHE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_count(i));
again:
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
* same page as we're writing to, without it being marked
* up-to-date.
*
* Not only is this an optimisation, but it is also required
* to check that the address is actually valid, when atomic
* usercopies are used, below.
*/
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
status = -EFAULT;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata);
if (unlikely(status))
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
pagefault_enable();
flush_dcache_page(page);
mark_page_accessed(page);
status = a_ops->write_end(file, mapping, pos, bytes, copied,
page, fsdata);
if (unlikely(status < 0))
break;
copied = status;
cond_resched();
iov_iter_advance(i, copied);
if (unlikely(copied == 0)) {
/*
* If we were unable to copy any data at all, we must
* fall back to a single segment length write.
*
* If we didn't fallback here, we could livelock
* because not all segments in the iov can be copied at
* once without a pagefault.
*/
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
iov_iter_single_seg_count(i));
goto again;
}
pos += copied;
written += copied;
balance_dirty_pages_ratelimited(mapping);
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
} while (iov_iter_count(i));
return written ? written : status;
}
ssize_t
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos, loff_t *ppos,
size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
ssize_t status;
struct iov_iter i;
iov_iter_init(&i, iov, nr_segs, count, written);
status = generic_perform_write(file, &i, pos);
if (likely(status >= 0)) {
written += status;
*ppos = pos + status;
}
return written ? written : status;
}
EXPORT_SYMBOL(generic_file_buffered_write);
/**
* __generic_file_aio_write - write data to a file
* @iocb: IO state structure (file, offset, etc.)
* @iov: vector with data to write
* @nr_segs: number of segments in the vector
* @ppos: position where to write
*
* This function does all the work needed for actually writing data to a
* file. It does all basic checks, removes SUID from the file, updates
* modification times and calls proper subroutines depending on whether we
* do direct IO or a standard buffered write.
*
* It expects i_mutex to be grabbed unless we work on a block device or similar
* object which does not need locking at all.
*
* This function does *not* take care of syncing data in case of O_SYNC write.
* A caller has to handle it. This is mainly due to the fact that we want to
* avoid syncing under i_mutex.
*/
ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
struct file *file = iocb->ki_filp;
struct address_space * mapping = file->f_mapping;
size_t ocount; /* original count */
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
loff_t pos;
ssize_t written;
ssize_t err;
ocount = 0;
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (err)
return err;
count = ocount;
pos = *ppos;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
written = 0;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
if (count == 0)
goto out;
err = file_remove_suid(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
loff_t endbyte;
ssize_t written_buffered;
written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
ppos, count, ocount);
if (written < 0 || written == count)
goto out;
/*
* direct-io write to a hole: fall through to buffered I/O
* for completing the rest of the request.
*/
pos += written;
count -= written;
written_buffered = generic_file_buffered_write(iocb, iov,
nr_segs, pos, ppos, count,
written);
/*
* If generic_file_buffered_write() retuned a synchronous error
* then we want to return the number of bytes which were
* direct-written, or the error code if that was zero. Note
* that this differs from normal direct-io semantics, which
* will return -EFOO even if some bytes were written.
*/
if (written_buffered < 0) {
err = written_buffered;
goto out;
}
/*
* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
} else {
/*
* We don't know how much we wrote, so just return
* the number of bytes which were direct-written
*/
}
} else {
written = generic_file_buffered_write(iocb, iov, nr_segs,
pos, ppos, count, written);
}
out:
current->backing_dev_info = NULL;
return written ? written : err;
}
EXPORT_SYMBOL(__generic_file_aio_write);
/**
* generic_file_aio_write - write data to a file
* @iocb: IO state structure
* @iov: vector with data to write
* @nr_segs: number of segments in the vector
* @pos: position in file where to write
*
* This is a wrapper around __generic_file_aio_write() to be used by most
* filesystems. It takes care of syncing the file in case of O_SYNC file
* and acquires i_mutex as needed.
*/
ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
if (err < 0 && ret > 0)
ret = err;
}
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);
/**
* try_to_release_page() - release old fs-specific metadata on a page
*
* @page: the page which the kernel is trying to free
* @gfp_mask: memory allocation flags (and I/O mode)
*
* The address_space is to try to release any data against the page
* (presumably at page->private). If the release was successful, return `1'.
* Otherwise return zero.
*
* This may also be called if PG_fscache is set on a page, indicating that the
* page is known to the local caching routines.
*
* The @gfp_mask argument specifies whether I/O may be performed to release
* this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
*
*/
int try_to_release_page(struct page *page, gfp_t gfp_mask)
{
struct address_space * const mapping = page->mapping;
BUG_ON(!PageLocked(page));
if (PageWriteback(page))
return 0;
if (mapping && mapping->a_ops->releasepage)
return mapping->a_ops->releasepage(page, gfp_mask);
return try_to_free_buffers(page);
}
EXPORT_SYMBOL(try_to_release_page);
| gpl-2.0 |
lzh6710/ubuntu-trusty | arch/tile/mm/hugetlbpage.c | 437 | 9866 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* TILE Huge TLB Page Support for Kernel.
* Taken from i386 hugetlb implementation:
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
#ifdef CONFIG_HUGETLB_SUPER_PAGES
/*
* Provide an additional huge page size (in addition to the regular default
* huge page size) if no "hugepagesz" arguments are specified.
* Note that it must be smaller than the default huge page size so
* that it's possible to allocate them on demand from the buddy allocator.
* You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
* or not define it at all.
*/
#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
/* "Extra" page-size multipliers, one per level of the page table. */
int huge_shift[HUGE_SHIFT_ENTRIES] = {
#ifdef ADDITIONAL_HUGE_SIZE
#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
[HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
#endif
};
#endif
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
addr &= -sz; /* Mask off any low bits in the address. */
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
#ifdef CONFIG_HUGETLB_SUPER_PAGES
if (sz >= PGDIR_SIZE) {
BUG_ON(sz != PGDIR_SIZE &&
sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
return (pte_t *)pud;
} else {
pmd_t *pmd = pmd_alloc(mm, pud, addr);
if (sz >= PMD_SIZE) {
BUG_ON(sz != PMD_SIZE &&
sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
return (pte_t *)pmd;
}
else {
if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
panic("Unexpected page size %#lx\n", sz);
return pte_alloc_map(mm, NULL, pmd, addr);
}
}
#else
BUG_ON(sz != PMD_SIZE);
return (pte_t *) pmd_alloc(mm, pud, addr);
#endif
}
static pte_t *get_pte(pte_t *base, int index, int level)
{
pte_t *ptep = base + index;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
if (!pte_present(*ptep) && huge_shift[level] != 0) {
unsigned long mask = -1UL << huge_shift[level];
pte_t *super_ptep = base + (index & mask);
pte_t pte = *super_ptep;
if (pte_present(pte) && pte_super(pte))
ptep = super_ptep;
}
#endif
return ptep;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
pte_t *pte;
#endif
/* Get the top-level page table entry. */
pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
/* We don't have four levels. */
pud = pud_offset(pgd, addr);
#ifndef __PAGETABLE_PUD_FOLDED
# error support fourth page table level
#endif
if (!pud_present(*pud))
return NULL;
/* Check for an L0 huge PTE, if we have three levels. */
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_huge(*pud))
return (pte_t *)pud;
pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
pmd_index(addr), 1);
if (!pmd_present(*pmd))
return NULL;
#else
pmd = pmd_offset(pud, addr);
#endif
/* Check for an L1 huge PTE. */
if (pmd_huge(*pmd))
return (pte_t *)pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
/* Check for an L2 huge PTE. */
pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
if (!pte_present(*pte))
return NULL;
if (pte_super(*pte))
return pte;
#endif
return NULL;
}
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
return ERR_PTR(-EINVAL);
}
int pmd_huge(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
}
int pud_huge(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write)
{
struct page *page;
page = pte_page(*(pte_t *)pmd);
if (page)
page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
return page;
}
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
struct page *page;
page = pte_page(*(pte_t *)pud);
if (page)
page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
return page;
}
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long addr0, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE;
addr = vm_unmapped_area(&info);
}
return addr;
}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
#ifdef CONFIG_HUGETLB_SUPER_PAGES
static __init int __setup_hugepagesz(unsigned long ps)
{
int log_ps = __builtin_ctzl(ps);
int level, base_shift;
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
pr_warn("Not enabling %ld byte huge pages;"
" must be a power of four.\n", ps);
return -EINVAL;
}
if (ps > 64*1024*1024*1024UL) {
pr_warn("Not enabling %ld MB huge pages;"
" largest legal value is 64 GB .\n", ps >> 20);
return -EINVAL;
} else if (ps >= PUD_SIZE) {
static long hv_jpage_size;
if (hv_jpage_size == 0)
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
if (hv_jpage_size != PUD_SIZE) {
pr_warn("Not enabling >= %ld MB huge pages:"
" hypervisor reports size %ld\n",
PUD_SIZE >> 20, hv_jpage_size);
return -EINVAL;
}
level = 0;
base_shift = PUD_SHIFT;
} else if (ps >= PMD_SIZE) {
level = 1;
base_shift = PMD_SHIFT;
} else if (ps > PAGE_SIZE) {
level = 2;
base_shift = PAGE_SHIFT;
} else {
pr_err("hugepagesz: huge page size %ld too small\n", ps);
return -EINVAL;
}
if (log_ps != base_shift) {
int shift_val = log_ps - base_shift;
if (huge_shift[level] != 0) {
int old_shift = base_shift + huge_shift[level];
pr_warn("Not enabling %ld MB huge pages;"
" already have size %ld MB.\n",
ps >> 20, (1UL << old_shift) >> 20);
return -EINVAL;
}
if (hv_set_pte_super_shift(level, shift_val) != 0) {
pr_warn("Not enabling %ld MB huge pages;"
" no hypervisor support.\n", ps >> 20);
return -EINVAL;
}
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
huge_shift[level] = shift_val;
}
hugetlb_add_hstate(log_ps - PAGE_SHIFT);
return 0;
}
static bool saw_hugepagesz;
static __init int setup_hugepagesz(char *opt)
{
if (!saw_hugepagesz) {
saw_hugepagesz = true;
memset(huge_shift, 0, sizeof(huge_shift));
}
return __setup_hugepagesz(memparse(opt, NULL));
}
__setup("hugepagesz=", setup_hugepagesz);
#ifdef ADDITIONAL_HUGE_SIZE
/*
* Provide an additional huge page size if no "hugepagesz" args are given.
* In that case, all the cores have properly set up their hv super_shift
* already, but we need to notify the hugetlb code to enable the
* new huge page size from the Linux point of view.
*/
static __init int add_default_hugepagesz(void)
{
if (!saw_hugepagesz) {
BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
ADDITIONAL_HUGE_SIZE);
BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
}
return 0;
}
arch_initcall(add_default_hugepagesz);
#endif
#endif /* CONFIG_HUGETLB_SUPER_PAGES */
| gpl-2.0 |
deano0714/EK02-CM-Kernel | arch/x86/kernel/process_32.c | 693 | 10585 | /*
* Copyright (C) 1995 Linus Torvalds
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/stackprotector.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
#include <linux/tick.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/desc.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
#include <linux/err.h>
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/idle.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.sp)[3];
}
#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
#endif
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
int cpu = smp_processor_id();
/*
* If we're the non-boot CPU, nothing set the stack canary up
* for us. CPU0 already has it initialized but no harm in
* doing it again. This is a good place for updating it, as
* we wont ever return from this function (so the invalid
* canaries already on the stack wont ever trigger).
*/
boot_init_stack_canary();
current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_stop_sched_tick(1);
while (!need_resched()) {
check_pgt_cache();
rmb();
if (cpu_is_offline(cpu))
play_dead();
local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
pm_idle();
start_critical_timings();
}
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
void __show_regs(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
unsigned long sp;
unsigned short ss, gs;
if (user_mode_vm(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
gs = get_user_gs(regs);
} else {
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
savesegment(gs, gs);
}
show_regs_common();
printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
(u16)regs->cs, regs->ip, regs->flags,
smp_processor_id());
print_symbol("EIP is at %s\n", regs->ip);
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);
printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
regs->si, regs->di, regs->bp, sp);
printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
(u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
if (!all)
return;
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
cr4 = read_cr4_safe();
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4);
get_debugreg(d0, 0);
get_debugreg(d1, 1);
get_debugreg(d2, 2);
get_debugreg(d3, 3);
printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
d0, d1, d2, d3);
get_debugreg(d6, 6);
get_debugreg(d7, 7);
printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
d6, d7);
}
void release_thread(struct task_struct *dead_task)
{
BUG_ON(dead_task->mm);
release_vm86_irqs(dead_task);
}
/*
* This gets called before we allocate a new thread and copy
* the current task into it.
*/
void prepare_to_copy(struct task_struct *tsk)
{
unlazy_fpu(tsk);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct pt_regs *childregs;
struct task_struct *tsk;
int err;
childregs = task_pt_regs(p);
*childregs = *regs;
childregs->ax = 0;
childregs->sp = sp;
p->thread.sp = (unsigned long) childregs;
p->thread.sp0 = (unsigned long) (childregs+1);
p->thread.ip = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(regs);
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
IO_BITMAP_BYTES, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr) {
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
set_tsk_thread_flag(p, TIF_IO_BITMAP);
}
err = 0;
/*
* Set a new TLS for the child thread?
*/
if (clone_flags & CLONE_SETTLS)
err = do_set_thread_area(p, -1,
(struct user_desc __user *)childregs->si, 0);
if (err && p->thread.io_bitmap_ptr) {
kfree(p->thread.io_bitmap_ptr);
p->thread.io_bitmap_max = 0;
}
return err;
}
void
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
set_user_gs(regs, 0);
regs->fs = 0;
set_fs(USER_DS);
regs->ds = __USER_DS;
regs->es = __USER_DS;
regs->ss = __USER_DS;
regs->cs = __USER_CS;
regs->ip = new_ip;
regs->sp = new_sp;
/*
* Free the old FP and other extended state
*/
free_thread_xstate(current);
}
EXPORT_SYMBOL_GPL(start_thread);
/*
* switch_to(x,yn) should switch tasks from x to y.
*
* We fsave/fwait so that an exception goes off at the right time
* (as a call from the fsave or fwait in effect) rather than to
* the wrong process. Lazy FP saving no longer makes any sense
* with modern CPU's, and this simplifies a lot of things (SMP
* and UP become the same).
*
* NOTE! We used to use the x86 hardware context switching. The
* reason for not using it any more becomes apparent when you
* try to recover gracefully from saved state that is no longer
* valid (stale segment register values in particular). With the
* hardware task-switch, there is no way to fix up bad state in
* a reasonable manner.
*
* The fact that Intel documents the hardware task-switching to
* be slow is a fairly red herring - this code is not noticeably
* faster. However, there _is_ some room for improvement here,
* so the performance issues may eventually be a valid point.
* More important, however, is the fact that this allows us much
* more flexibility.
*
* The return value (in %ax) will be the "prev" task after
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
bool preload_fpu;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
__unlazy_fpu(prev_p);
/* we're going to use this soon, after a few expensive things */
if (preload_fpu)
prefetch(next->fpu.state);
/*
* Reload esp0.
*/
load_sp0(tss, next);
/*
* Save away %gs. No need to save %fs, as it was saved on the
* stack on entry. No need to save %es and %ds, as those are
* always kernel segments while inside the kernel. Doing this
* before setting the new TLS descriptors avoids the situation
* where we temporarily have non-reloadable segments in %fs
* and %gs. This could be an issue if the NMI handler ever
* used %fs or %gs (it does not today), or if the kernel is
* running inside of a hypervisor layer.
*/
lazy_save_gs(prev->gs);
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
load_TLS(next, cpu);
/*
* Restore IOPL if needed. In normal use, the flags restore
* in the switch assembly will handle this. But if the kernel
* is running virtualized at a non-zero CPL, the popf will
* not restore flags, so it must be done in a separate step.
*/
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
set_iopl_mask(next->iopl);
/*
* Now maybe handle debug registers and/or IO bitmaps
*/
if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
/* If we're going to preload the fpu context, make sure clts
is run while we're batching the cpu state updates. */
if (preload_fpu)
clts();
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
* the GDT and LDT are properly updated, and must be
* done before math_state_restore, so the TS bit is up
* to date.
*/
arch_end_context_switch(next_p);
if (preload_fpu)
__math_state_restore();
/*
* Restore %gs if needed (which is common)
*/
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
percpu_write(current_task, next_p);
return prev_p;
}
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
unsigned long get_wchan(struct task_struct *p)
{
unsigned long bp, sp, ip;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
sp = p->thread.sp;
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
return 0;
/* include/asm-i386/system.h:switch_to() pushes bp last. */
bp = *(unsigned long *) sp;
do {
if (bp < stack_page || bp > top_ebp+stack_page)
return 0;
ip = *(unsigned long *) (bp+4);
if (!in_sched_functions(ip))
return ip;
bp = *(unsigned long *) bp;
} while (count++ < 16);
return 0;
}
| gpl-2.0 |
Project-Elite/elite_kernelAOSP | net/sched/sch_qfq.c | 949 | 27834 | /*
* net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
*
* Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/pkt_sched.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
/* Quick Fair Queueing
===================
Sources:
Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
Packet Scheduling with Tight Bandwidth Distribution Guarantees."
See also:
http://retis.sssup.it/~fabio/linux/qfq/
*/
/*
Virtual time computations.
S, F and V are all computed in fixed point arithmetic with
FRAC_BITS decimal bits.
QFQ_MAX_INDEX is the maximum index allowed for a group. We need
one bit per index.
QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
The layout of the bits is as below:
[ MTU_SHIFT ][ FRAC_BITS ]
[ MAX_INDEX ][ MIN_SLOT_SHIFT ]
^.__grp->index = 0
*.__grp->slot_shift
where MIN_SLOT_SHIFT is derived by difference from the others.
The max group index corresponds to Lmax/w_min, where
Lmax=1<<MTU_SHIFT, w_min = 1 .
From this, and knowing how many groups (MAX_INDEX) we want,
we can derive the shift corresponding to each group.
Because we often need to compute
F = S + len/w_i and V = V + len/wsum
instead of storing w_i store the value
inv_w = (1<<FRAC_BITS)/w_i
so we can do F = S + len * inv_w * wsum.
We use W_TOT in the formulas so we can easily move between
static and adaptive weight sum.
The per-scheduler-instance data contain all the data structures
for the scheduler: bitmaps and bucket lists.
*/
/*
* Maximum number of consecutive slots occupied by backlogged classes
* inside a group.
*/
#define QFQ_MAX_SLOTS 32
/*
* Shifts used for class<->group mapping. We allow class weights that are
* in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
* group with the smallest index that can support the L_i / r_i configured
* for the class.
*
* grp->index is the index of the group; and grp->slot_shift
* is the shift for the corresponding (scaled) sigma_i.
*/
#define QFQ_MAX_INDEX 19
#define QFQ_MAX_WSHIFT 16
#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
#define FRAC_BITS 30 /* fixed point arithmetic */
#define ONE_FP (1UL << FRAC_BITS)
#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
#define QFQ_MTU_SHIFT 11
#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
/*
* Possible group states. These values are used as indexes for the bitmaps
* array of struct qfq_queue.
*/
enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
struct qfq_group;
struct qfq_class {
struct Qdisc_class_common common;
unsigned int refcnt;
unsigned int filter_cnt;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct Qdisc *qdisc;
struct hlist_node next; /* Link for the slot list. */
u64 S, F; /* flow timestamps (exact) */
/* group we belong to. In principle we would need the index,
* which is log_2(lmax/weight), but we never reference it
* directly, only the group.
*/
struct qfq_group *grp;
/* these are copied from the flowset. */
u32 inv_w; /* ONE_FP/weight */
u32 lmax; /* Max packet size for this flow. */
};
struct qfq_group {
u64 S, F; /* group timestamps (approx). */
unsigned int slot_shift; /* Slot shift. */
unsigned int index; /* Group index. */
unsigned int front; /* Index of the front slot. */
unsigned long full_slots; /* non-empty slots */
/* Array of RR lists of active classes. */
struct hlist_head slots[QFQ_MAX_SLOTS];
};
struct qfq_sched {
struct tcf_proto *filter_list;
struct Qdisc_class_hash clhash;
u64 V; /* Precise virtual time. */
u32 wsum; /* weight sum */
unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
};
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
{
struct qfq_sched *q = qdisc_priv(sch);
struct Qdisc_class_common *clc;
clc = qdisc_class_find(&q->clhash, classid);
if (clc == NULL)
return NULL;
return container_of(clc, struct qfq_class, common);
}
static void qfq_purge_queue(struct qfq_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
qdisc_reset(cl->qdisc);
qdisc_tree_decrease_qlen(cl->qdisc, len);
}
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
[TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
[TCA_QFQ_LMAX] = { .type = NLA_U32 },
};
/*
* Calculate a flow index, given its weight and maximum packet length.
* index = log_2(maxlen/weight) but we need to apply the scaling.
* This is used only once at flow creation.
*/
static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
{
u64 slot_size = (u64)maxlen * inv_w;
unsigned long size_map;
int index = 0;
size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
if (!size_map)
goto out;
index = __fls(size_map) + 1; /* basically a log_2 */
index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
if (index < 0)
index = 0;
out:
pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
(unsigned long) ONE_FP/inv_w, maxlen, index);
return index;
}
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)*arg;
struct nlattr *tb[TCA_QFQ_MAX + 1];
u32 weight, lmax, inv_w;
int i, err;
if (tca[TCA_OPTIONS] == NULL) {
pr_notice("qfq: no options\n");
return -EINVAL;
}
err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
if (err < 0)
return err;
if (tb[TCA_QFQ_WEIGHT]) {
weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
pr_notice("qfq: invalid weight %u\n", weight);
return -EINVAL;
}
} else
weight = 1;
inv_w = ONE_FP / weight;
weight = ONE_FP / inv_w;
if (q->wsum + weight > QFQ_MAX_WSUM) {
pr_notice("qfq: total weight out of range (%u + %u)\n",
weight, q->wsum);
return -EINVAL;
}
if (tb[TCA_QFQ_LMAX]) {
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
pr_notice("qfq: invalid max length %u\n", lmax);
return -EINVAL;
}
} else
lmax = 1UL << QFQ_MTU_SHIFT;
if (cl != NULL) {
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err)
return err;
}
sch_tree_lock(sch);
if (tb[TCA_QFQ_WEIGHT]) {
q->wsum = weight - ONE_FP / cl->inv_w;
cl->inv_w = inv_w;
}
sch_tree_unlock(sch);
return 0;
}
cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
if (cl == NULL)
return -ENOBUFS;
cl->refcnt = 1;
cl->common.classid = classid;
cl->lmax = lmax;
cl->inv_w = inv_w;
i = qfq_calc_index(cl->inv_w, cl->lmax);
cl->grp = &q->groups[i];
q->wsum += weight;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
qdisc_destroy(cl->qdisc);
kfree(cl);
return err;
}
}
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
sch_tree_unlock(sch);
qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
}
static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
{
struct qfq_sched *q = qdisc_priv(sch);
if (cl->inv_w) {
q->wsum -= ONE_FP / cl->inv_w;
cl->inv_w = 0;
}
gen_kill_estimator(&cl->bstats, &cl->rate_est);
qdisc_destroy(cl->qdisc);
kfree(cl);
}
static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)arg;
if (cl->filter_cnt > 0)
return -EBUSY;
sch_tree_lock(sch);
qfq_purge_queue(cl);
qdisc_class_hash_remove(&q->clhash, &cl->common);
BUG_ON(--cl->refcnt == 0);
/*
* This shouldn't happen: we "hold" one cops->get() when called
* from tc_ctl_tclass; the destroy method is done from cops->put().
*/
sch_tree_unlock(sch);
return 0;
}
static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
{
struct qfq_class *cl = qfq_find_class(sch, classid);
if (cl != NULL)
cl->refcnt++;
return (unsigned long)cl;
}
static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
{
struct qfq_class *cl = (struct qfq_class *)arg;
if (--cl->refcnt == 0)
qfq_destroy_class(sch, cl);
}
static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
{
struct qfq_sched *q = qdisc_priv(sch);
if (cl)
return NULL;
return &q->filter_list;
}
static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct qfq_class *cl = qfq_find_class(sch, classid);
if (cl != NULL)
cl->filter_cnt++;
return (unsigned long)cl;
}
static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
{
struct qfq_class *cl = (struct qfq_class *)arg;
cl->filter_cnt--;
}
static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
struct Qdisc *new, struct Qdisc **old)
{
struct qfq_class *cl = (struct qfq_class *)arg;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, cl->common.classid);
if (new == NULL)
new = &noop_qdisc;
}
sch_tree_lock(sch);
qfq_purge_queue(cl);
*old = cl->qdisc;
cl->qdisc = new;
sch_tree_unlock(sch);
return 0;
}
static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
{
struct qfq_class *cl = (struct qfq_class *)arg;
return cl->qdisc;
}
static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct qfq_class *cl = (struct qfq_class *)arg;
struct nlattr *nest;
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
tcm->tcm_info = cl->qdisc->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct qfq_class *cl = (struct qfq_class *)arg;
struct tc_qfq_stats xstats;
memset(&xstats, 0, sizeof(xstats));
cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
xstats.weight = ONE_FP/cl->inv_w;
xstats.lmax = cl->lmax;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}
static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct hlist_node *n;
unsigned int i;
if (arg->stop)
return;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct tcf_result res;
int result;
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
pr_debug("qfq_classify: found %d\n", skb->priority);
cl = qfq_find_class(sch, skb->priority);
if (cl != NULL)
return cl;
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, q->filter_list, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
#endif
cl = (struct qfq_class *)res.class;
if (cl == NULL)
cl = qfq_find_class(sch, res.classid);
return cl;
}
return NULL;
}
/* Generic comparison function, handling wraparound. */
static inline int qfq_gt(u64 a, u64 b)
{
return (s64)(a - b) > 0;
}
/* Round a precise timestamp to its slotted value. */
static inline u64 qfq_round_down(u64 ts, unsigned int shift)
{
return ts & ~((1ULL << shift) - 1);
}
/* return the pointer to the group with lowest index in the bitmap */
static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
unsigned long bitmap)
{
int index = __ffs(bitmap);
return &q->groups[index];
}
/* Calculate a mask to mimic what would be ffs_from(). */
static inline unsigned long mask_from(unsigned long bitmap, int from)
{
return bitmap & ~((1UL << from) - 1);
}
/*
* The state computation relies on ER=0, IR=1, EB=2, IB=3
* First compute eligibility comparing grp->S, q->V,
* then check if someone is blocking us and possibly add EB
*/
static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
{
/* if S > V we are not eligible */
unsigned int state = qfq_gt(grp->S, q->V);
unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
struct qfq_group *next;
if (mask) {
next = qfq_ffs(q, mask);
if (qfq_gt(grp->F, next->F))
state |= EB;
}
return state;
}
/*
* In principle
* q->bitmaps[dst] |= q->bitmaps[src] & mask;
* q->bitmaps[src] &= ~mask;
* but we should make sure that src != dst
*/
static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
int src, int dst)
{
q->bitmaps[dst] |= q->bitmaps[src] & mask;
q->bitmaps[src] &= ~mask;
}
static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
{
unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
struct qfq_group *next;
if (mask) {
next = qfq_ffs(q, mask);
if (!qfq_gt(next->F, old_F))
return;
}
mask = (1UL << index) - 1;
qfq_move_groups(q, mask, EB, ER);
qfq_move_groups(q, mask, IB, IR);
}
/*
* perhaps
*
old_V ^= q->V;
old_V >>= QFQ_MIN_SLOT_SHIFT;
if (old_V) {
...
}
*
*/
static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
{
unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
if (vslot != old_vslot) {
unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
qfq_move_groups(q, mask, IR, ER);
qfq_move_groups(q, mask, IB, EB);
}
}
/*
* XXX we should make sure that slot becomes less than 32.
* This is guaranteed by the input values.
* roundedS is always cl->S rounded on grp->slot_shift bits.
*/
static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
u64 roundedS)
{
u64 slot = (roundedS - grp->S) >> grp->slot_shift;
unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
hlist_add_head(&cl->next, &grp->slots[i]);
__set_bit(slot, &grp->full_slots);
}
/* Maybe introduce hlist_first_entry?? */
static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
{
return hlist_entry(grp->slots[grp->front].first,
struct qfq_class, next);
}
/*
* remove the entry from the slot
*/
static void qfq_front_slot_remove(struct qfq_group *grp)
{
struct qfq_class *cl = qfq_slot_head(grp);
BUG_ON(!cl);
hlist_del(&cl->next);
if (hlist_empty(&grp->slots[grp->front]))
__clear_bit(0, &grp->full_slots);
}
/*
* Returns the first full queue in a group. As a side effect,
* adjust the bucket list so the first non-empty bucket is at
* position 0 in full_slots.
*/
static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
{
unsigned int i;
pr_debug("qfq slot_scan: grp %u full %#lx\n",
grp->index, grp->full_slots);
if (grp->full_slots == 0)
return NULL;
i = __ffs(grp->full_slots); /* zero based */
if (i > 0) {
grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
grp->full_slots >>= i;
}
return qfq_slot_head(grp);
}
/*
* adjust the bucket list. When the start time of a group decreases,
* we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
* move the objects. The mask of occupied slots must be shifted
* because we use ffs() to find the first non-empty slot.
* This covers decreases in the group's start time, but what about
* increases of the start time ?
* Here too we should make sure that i is less than 32
*/
static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
{
unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
grp->full_slots <<= i;
grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
}
static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
{
struct qfq_group *grp;
unsigned long ineligible;
ineligible = q->bitmaps[IR] | q->bitmaps[IB];
if (ineligible) {
if (!q->bitmaps[ER]) {
grp = qfq_ffs(q, ineligible);
if (qfq_gt(grp->S, q->V))
q->V = grp->S;
}
qfq_make_eligible(q, old_V);
}
}
/* What is length of next packet in queue (0 if queue is empty) */
static unsigned int qdisc_peek_len(struct Qdisc *sch)
{
struct sk_buff *skb;
skb = sch->ops->peek(sch);
return skb ? qdisc_pkt_len(skb) : 0;
}
/*
* Updates the class, returns true if also the group needs to be updated.
*/
static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
{
unsigned int len = qdisc_peek_len(cl->qdisc);
cl->S = cl->F;
if (!len)
qfq_front_slot_remove(grp); /* queue is empty */
else {
u64 roundedS;
cl->F = cl->S + (u64)len * cl->inv_w;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
if (roundedS == grp->S)
return false;
qfq_front_slot_remove(grp);
qfq_slot_insert(grp, cl, roundedS);
}
return true;
}
static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
struct qfq_class *cl;
struct sk_buff *skb;
unsigned int len;
u64 old_V;
if (!q->bitmaps[ER])
return NULL;
grp = qfq_ffs(q, q->bitmaps[ER]);
cl = qfq_slot_head(grp);
skb = qdisc_dequeue_peeked(cl->qdisc);
if (!skb) {
WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
return NULL;
}
sch->q.qlen--;
qdisc_bstats_update(sch, skb);
old_V = q->V;
len = qdisc_pkt_len(skb);
q->V += (u64)len * IWSUM;
pr_debug("qfq dequeue: len %u F %lld now %lld\n",
len, (unsigned long long) cl->F, (unsigned long long) q->V);
if (qfq_update_class(grp, cl)) {
u64 old_F = grp->F;
cl = qfq_slot_scan(grp);
if (!cl)
__clear_bit(grp->index, &q->bitmaps[ER]);
else {
u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
unsigned int s;
if (grp->S == roundedS)
goto skip_unblock;
grp->S = roundedS;
grp->F = roundedS + (2ULL << grp->slot_shift);
__clear_bit(grp->index, &q->bitmaps[ER]);
s = qfq_calc_state(q, grp);
__set_bit(grp->index, &q->bitmaps[s]);
}
qfq_unblock_groups(q, grp->index, old_F);
}
skip_unblock:
qfq_update_eligible(q, old_V);
return skb;
}
/*
* Assign a reasonable start time for a new flow k in group i.
* Admissible values for \hat(F) are multiples of \sigma_i
* no greater than V+\sigma_i . Larger values mean that
* we had a wraparound so we consider the timestamp to be stale.
*
* If F is not stale and F >= V then we set S = F.
* Otherwise we should assign S = V, but this may violate
* the ordering in ER. So, if we have groups in ER, set S to
* the F_j of the first group j which would be blocking us.
* We are guaranteed not to move S backward because
* otherwise our group i would still be blocked.
*/
static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
{
unsigned long mask;
uint32_t limit, roundedF;
int slot_shift = cl->grp->slot_shift;
roundedF = qfq_round_down(cl->F, slot_shift);
limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */
mask = mask_from(q->bitmaps[ER], cl->grp->index);
if (mask) {
struct qfq_group *next = qfq_ffs(q, mask);
if (qfq_gt(roundedF, next->F)) {
if (qfq_gt(limit, next->F))
cl->S = next->F;
else /* preserve timestamp correctness */
cl->S = limit;
return;
}
}
cl->S = q->V;
} else /* timestamp is not stale */
cl->S = cl->F;
}
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
struct qfq_class *cl;
int err;
u64 roundedS;
int s;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return err;
}
pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
sch->qstats.drops++;
}
return err;
}
bstats_update(&cl->bstats, skb);
++sch->q.qlen;
/* If the new skb is not the head of queue, then done here. */
if (cl->qdisc->q.qlen != 1)
return err;
/* If reach this point, queue q was idle */
grp = cl->grp;
qfq_update_start(q, cl);
/* compute new finish time and rounded start. */
cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
/*
* insert cl in the correct bucket.
* If cl->S >= grp->S we don't need to adjust the
* bucket list and simply go to the insertion phase.
* Otherwise grp->S is decreasing, we must make room
* in the bucket list, and also recompute the group state.
* Finally, if there were no flows in this group and nobody
* was in ER make sure to adjust V.
*/
if (grp->full_slots) {
if (!qfq_gt(grp->S, cl->S))
goto skip_update;
/* create a slot for this cl->S */
qfq_slot_rotate(grp, roundedS);
/* group was surely ineligible, remove */
__clear_bit(grp->index, &q->bitmaps[IR]);
__clear_bit(grp->index, &q->bitmaps[IB]);
} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
q->V = roundedS;
grp->S = roundedS;
grp->F = roundedS + (2ULL << grp->slot_shift);
s = qfq_calc_state(q, grp);
__set_bit(grp->index, &q->bitmaps[s]);
pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
s, q->bitmaps[s],
(unsigned long long) cl->S,
(unsigned long long) cl->F,
(unsigned long long) q->V);
skip_update:
qfq_slot_insert(grp, cl, roundedS);
return err;
}
static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
struct qfq_class *cl)
{
unsigned int i, offset;
u64 roundedS;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
offset = (roundedS - grp->S) >> grp->slot_shift;
i = (grp->front + offset) % QFQ_MAX_SLOTS;
hlist_del(&cl->next);
if (hlist_empty(&grp->slots[i]))
__clear_bit(offset, &grp->full_slots);
}
/*
* called to forcibly destroy a queue.
* If the queue is not in the front bucket, or if it has
* other queues in the front bucket, we can simply remove
* the queue with no other side effects.
* Otherwise we must propagate the event up.
*/
static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
{
struct qfq_group *grp = cl->grp;
unsigned long mask;
u64 roundedS;
int s;
cl->F = cl->S;
qfq_slot_remove(q, grp, cl);
if (!grp->full_slots) {
__clear_bit(grp->index, &q->bitmaps[IR]);
__clear_bit(grp->index, &q->bitmaps[EB]);
__clear_bit(grp->index, &q->bitmaps[IB]);
if (test_bit(grp->index, &q->bitmaps[ER]) &&
!(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
if (mask)
mask = ~((1UL << __fls(mask)) - 1);
else
mask = ~0UL;
qfq_move_groups(q, mask, EB, ER);
qfq_move_groups(q, mask, IB, IR);
}
__clear_bit(grp->index, &q->bitmaps[ER]);
} else if (hlist_empty(&grp->slots[grp->front])) {
cl = qfq_slot_scan(grp);
roundedS = qfq_round_down(cl->S, grp->slot_shift);
if (grp->S != roundedS) {
__clear_bit(grp->index, &q->bitmaps[ER]);
__clear_bit(grp->index, &q->bitmaps[IR]);
__clear_bit(grp->index, &q->bitmaps[EB]);
__clear_bit(grp->index, &q->bitmaps[IB]);
grp->S = roundedS;
grp->F = roundedS + (2ULL << grp->slot_shift);
s = qfq_calc_state(q, grp);
__set_bit(grp->index, &q->bitmaps[s]);
}
}
qfq_update_eligible(q, q->V);
}
static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)arg;
if (cl->qdisc->q.qlen == 0)
qfq_deactivate_class(q, cl);
}
static unsigned int qfq_drop(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
unsigned int i, j, len;
for (i = 0; i <= QFQ_MAX_INDEX; i++) {
grp = &q->groups[i];
for (j = 0; j < QFQ_MAX_SLOTS; j++) {
struct qfq_class *cl;
struct hlist_node *n;
hlist_for_each_entry(cl, n, &grp->slots[j], next) {
if (!cl->qdisc->ops->drop)
continue;
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
sch->q.qlen--;
if (!cl->qdisc->q.qlen)
qfq_deactivate_class(q, cl);
return len;
}
}
}
}
return 0;
}
static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
int i, j, err;
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
for (i = 0; i <= QFQ_MAX_INDEX; i++) {
grp = &q->groups[i];
grp->index = i;
grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
- (QFQ_MAX_INDEX - i);
for (j = 0; j < QFQ_MAX_SLOTS; j++)
INIT_HLIST_HEAD(&grp->slots[j]);
}
return 0;
}
static void qfq_reset_qdisc(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_group *grp;
struct qfq_class *cl;
struct hlist_node *n, *tmp;
unsigned int i, j;
for (i = 0; i <= QFQ_MAX_INDEX; i++) {
grp = &q->groups[i];
for (j = 0; j < QFQ_MAX_SLOTS; j++) {
hlist_for_each_entry_safe(cl, n, tmp,
&grp->slots[j], next) {
qfq_deactivate_class(q, cl);
}
}
}
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
qdisc_reset(cl->qdisc);
}
sch->q.qlen = 0;
}
static void qfq_destroy_qdisc(struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct hlist_node *n, *next;
unsigned int i;
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
common.hnode) {
qfq_destroy_class(sch, cl);
}
}
qdisc_class_hash_destroy(&q->clhash);
}
static const struct Qdisc_class_ops qfq_class_ops = {
.change = qfq_change_class,
.delete = qfq_delete_class,
.get = qfq_get_class,
.put = qfq_put_class,
.tcf_chain = qfq_tcf_chain,
.bind_tcf = qfq_bind_tcf,
.unbind_tcf = qfq_unbind_tcf,
.graft = qfq_graft_class,
.leaf = qfq_class_leaf,
.qlen_notify = qfq_qlen_notify,
.dump = qfq_dump_class,
.dump_stats = qfq_dump_class_stats,
.walk = qfq_walk,
};
static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
.cl_ops = &qfq_class_ops,
.id = "qfq",
.priv_size = sizeof(struct qfq_sched),
.enqueue = qfq_enqueue,
.dequeue = qfq_dequeue,
.peek = qdisc_peek_dequeued,
.drop = qfq_drop,
.init = qfq_init_qdisc,
.reset = qfq_reset_qdisc,
.destroy = qfq_destroy_qdisc,
.owner = THIS_MODULE,
};
static int __init qfq_init(void)
{
return register_qdisc(&qfq_qdisc_ops);
}
static void __exit qfq_exit(void)
{
unregister_qdisc(&qfq_qdisc_ops);
}
module_init(qfq_init);
module_exit(qfq_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
ChiefzReloaded/lge-kernel-startablet-ICS | drivers/mtd/nand/gpio.c | 1205 | 9398 | /*
* drivers/mtd/nand/gpio.c
*
* Updated, and converted to generic GPIO based driver by Russell King.
*
* Written by Ben Dooks <ben@simtec.co.uk>
* Based on 2.4 version by Mark Whittaker
*
* © 2004 Simtec Electronics
*
* Device driver for NAND connected via GPIO
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct mtd_info mtd_info;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
};
#define gpio_nand_getpriv(x) container_of(x, struct gpiomtd, mtd_info)
#ifdef CONFIG_ARM
/* gpio_nand_dosync()
*
* Make sure the GPIO state changes occur in-order with writes to NAND
* memory region.
* Needed on PXA due to bus-reordering within the SoC itself (see section on
* I/O ordering in PXA manual (section 2.3, p35)
*/
static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
{
unsigned long tmp;
if (gpiomtd->io_sync) {
/*
* Linux memory barriers don't cater for what's required here.
* What's required is what's here - a read from a separate
* region with a dependency on that read.
*/
tmp = readl(gpiomtd->io_sync);
asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
}
}
#else
static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
#endif
static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
return;
writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
gpio_nand_dosync(gpiomtd);
}
static void gpio_nand_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
writesb(this->IO_ADDR_W, buf, len);
}
static void gpio_nand_readbuf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
readsb(this->IO_ADDR_R, buf, len);
}
static int gpio_nand_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
unsigned char read, *p = (unsigned char *) buf;
int i, err = 0;
for (i = 0; i < len; i++) {
read = readb(this->IO_ADDR_R);
if (read != p[i]) {
pr_debug("%s: err at %d (read %04x vs %04x)\n",
__func__, i, read, p[i]);
err = -EFAULT;
}
}
return err;
}
static void gpio_nand_writebuf16(struct mtd_info *mtd, const u_char *buf,
int len)
{
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
writesw(this->IO_ADDR_W, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
for (i = 0; i < len; i += 2, ptr++)
writew(*ptr, this->IO_ADDR_W);
}
}
static void gpio_nand_readbuf16(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *this = mtd->priv;
if (IS_ALIGNED((unsigned long)buf, 2)) {
readsw(this->IO_ADDR_R, buf, len>>1);
} else {
int i;
unsigned short *ptr = (unsigned short *)buf;
for (i = 0; i < len; i += 2, ptr++)
*ptr = readw(this->IO_ADDR_R);
}
}
static int gpio_nand_verifybuf16(struct mtd_info *mtd, const u_char *buf,
int len)
{
struct nand_chip *this = mtd->priv;
unsigned short read, *p = (unsigned short *) buf;
int i, err = 0;
len >>= 1;
for (i = 0; i < len; i++) {
read = readw(this->IO_ADDR_R);
if (read != p[i]) {
pr_debug("%s: err at %d (read %04x vs %04x)\n",
__func__, i, read, p[i]);
err = -EFAULT;
}
}
return err;
}
static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
static int __devexit gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
struct resource *res;
nand_release(&gpiomtd->mtd_info);
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
gpio_set_value(gpiomtd->plat.gpio_nce, 1);
gpio_free(gpiomtd->plat.gpio_cle);
gpio_free(gpiomtd->plat.gpio_ale);
gpio_free(gpiomtd->plat.gpio_nce);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_free(gpiomtd->plat.gpio_nwp);
gpio_free(gpiomtd->plat.gpio_rdy);
kfree(gpiomtd);
return 0;
}
static void __iomem *request_and_remap(struct resource *res, size_t size,
const char *name, int *err)
{
void __iomem *ptr;
if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
}
static int __devinit gpio_nand_probe(struct platform_device *dev)
{
struct gpiomtd *gpiomtd;
struct nand_chip *this;
struct resource *res0, *res1;
int ret;
if (!dev->dev.platform_data)
return -EINVAL;
res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res0)
return -EINVAL;
gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL);
if (gpiomtd == NULL) {
dev_err(&dev->dev, "failed to create NAND MTD\n");
return -ENOMEM;
}
this = &gpiomtd->nand_chip;
this->IO_ADDR_R = request_and_remap(res0, 2, "NAND", &ret);
if (!this->IO_ADDR_R) {
dev_err(&dev->dev, "unable to map NAND\n");
goto err_map;
}
res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (res1) {
gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
if (!gpiomtd->io_sync) {
dev_err(&dev->dev, "unable to map sync NAND\n");
goto err_sync;
}
}
memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
goto err_nce;
gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
ret = gpio_request(gpiomtd->plat.gpio_nwp, "NAND NWP");
if (ret)
goto err_nwp;
gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
}
ret = gpio_request(gpiomtd->plat.gpio_ale, "NAND ALE");
if (ret)
goto err_ale;
gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
ret = gpio_request(gpiomtd->plat.gpio_cle, "NAND CLE");
if (ret)
goto err_cle;
gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
ret = gpio_request(gpiomtd->plat.gpio_rdy, "NAND RDY");
if (ret)
goto err_rdy;
gpio_direction_input(gpiomtd->plat.gpio_rdy);
this->IO_ADDR_W = this->IO_ADDR_R;
this->ecc.mode = NAND_ECC_SOFT;
this->options = gpiomtd->plat.options;
this->chip_delay = gpiomtd->plat.chip_delay;
/* install our routines */
this->cmd_ctrl = gpio_nand_cmd_ctrl;
this->dev_ready = gpio_nand_devready;
if (this->options & NAND_BUSWIDTH_16) {
this->read_buf = gpio_nand_readbuf16;
this->write_buf = gpio_nand_writebuf16;
this->verify_buf = gpio_nand_verifybuf16;
} else {
this->read_buf = gpio_nand_readbuf;
this->write_buf = gpio_nand_writebuf;
this->verify_buf = gpio_nand_verifybuf;
}
/* set the mtd private data for the nand driver */
gpiomtd->mtd_info.priv = this;
gpiomtd->mtd_info.owner = THIS_MODULE;
if (nand_scan(&gpiomtd->mtd_info, 1)) {
dev_err(&dev->dev, "no nand chips found?\n");
ret = -ENXIO;
goto err_wp;
}
if (gpiomtd->plat.adjust_parts)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
add_mtd_partitions(&gpiomtd->mtd_info, gpiomtd->plat.parts,
gpiomtd->plat.num_parts);
platform_set_drvdata(dev, gpiomtd);
return 0;
err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
gpio_free(gpiomtd->plat.gpio_rdy);
err_rdy:
gpio_free(gpiomtd->plat.gpio_cle);
err_cle:
gpio_free(gpiomtd->plat.gpio_ale);
err_ale:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_free(gpiomtd->plat.gpio_nwp);
err_nwp:
gpio_free(gpiomtd->plat.gpio_nce);
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
}
static struct platform_driver gpio_nand_driver = {
.probe = gpio_nand_probe,
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
},
};
static int __init gpio_nand_init(void)
{
printk(KERN_INFO "GPIO NAND driver, © 2004 Simtec Electronics\n");
return platform_driver_register(&gpio_nand_driver);
}
static void __exit gpio_nand_exit(void)
{
platform_driver_unregister(&gpio_nand_driver);
}
module_init(gpio_nand_init);
module_exit(gpio_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("GPIO NAND Driver");
| gpl-2.0 |
spica234/HP-TestBuild-Repo-upwords-Sr3R | sound/pci/ctxfi/ctdaio.c | 1205 | 17793 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctdaio.c
*
* @Brief
* This file contains the implementation of Digital Audio Input Output
* resource management object.
*
* @Author Liu Chun
* @Date May 23 2008
*
*/
#include "ctdaio.h"
#include "cthardware.h"
#include "ctimap.h"
#include <linux/slab.h>
#include <linux/kernel.h>
#define DAIO_RESOURCE_NUM NUM_DAIOTYP
#define DAIO_OUT_MAX SPDIFOO
union daio_usage {
struct {
unsigned short lineo1:1;
unsigned short lineo2:1;
unsigned short lineo3:1;
unsigned short lineo4:1;
unsigned short spdifoo:1;
unsigned short lineim:1;
unsigned short spdifio:1;
unsigned short spdifi1:1;
} bf;
unsigned short data;
};
struct daio_rsc_idx {
unsigned short left;
unsigned short right;
};
struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x00, .right = 0x01},
[LINEO2] = {.left = 0x18, .right = 0x19},
[LINEO3] = {.left = 0x08, .right = 0x09},
[LINEO4] = {.left = 0x10, .right = 0x11},
[LINEIM] = {.left = 0x1b5, .right = 0x1bd},
[SPDIFOO] = {.left = 0x20, .right = 0x21},
[SPDIFIO] = {.left = 0x15, .right = 0x1d},
[SPDIFI1] = {.left = 0x95, .right = 0x9d},
};
struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[LINEO1] = {.left = 0x40, .right = 0x41},
[LINEO2] = {.left = 0x60, .right = 0x61},
[LINEO3] = {.left = 0x50, .right = 0x51},
[LINEO4] = {.left = 0x70, .right = 0x71},
[LINEIM] = {.left = 0x45, .right = 0xc5},
[SPDIFOO] = {.left = 0x00, .right = 0x01},
[SPDIFIO] = {.left = 0x05, .right = 0x85},
};
static int daio_master(struct rsc *rsc)
{
/* Actually, this is not the resource index of DAIO.
* For DAO, it is the input mapper index. And, for DAI,
* it is the output time-slot index. */
return rsc->conj = rsc->idx;
}
static int daio_index(const struct rsc *rsc)
{
return rsc->conj;
}
static int daio_out_next_conj(struct rsc *rsc)
{
return rsc->conj += 2;
}
static int daio_in_next_conj_20k1(struct rsc *rsc)
{
return rsc->conj += 0x200;
}
static int daio_in_next_conj_20k2(struct rsc *rsc)
{
return rsc->conj += 0x100;
}
static struct rsc_ops daio_out_rsc_ops = {
.master = daio_master,
.next_conj = daio_out_next_conj,
.index = daio_index,
.output_slot = NULL,
};
static struct rsc_ops daio_in_rsc_ops_20k1 = {
.master = daio_master,
.next_conj = daio_in_next_conj_20k1,
.index = NULL,
.output_slot = daio_index,
};
static struct rsc_ops daio_in_rsc_ops_20k2 = {
.master = daio_master,
.next_conj = daio_in_next_conj_20k2,
.index = NULL,
.output_slot = daio_index,
};
static unsigned int daio_device_index(enum DAIOTYP type, struct hw *hw)
{
switch (hw->chip_type) {
case ATC20K1:
switch (type) {
case SPDIFOO: return 0;
case SPDIFIO: return 0;
case SPDIFI1: return 1;
case LINEO1: return 4;
case LINEO2: return 7;
case LINEO3: return 5;
case LINEO4: return 6;
case LINEIM: return 7;
default: return -EINVAL;
}
case ATC20K2:
switch (type) {
case SPDIFOO: return 0;
case SPDIFIO: return 0;
case LINEO1: return 4;
case LINEO2: return 7;
case LINEO3: return 5;
case LINEO4: return 6;
case LINEIM: return 4;
default: return -EINVAL;
}
default:
return -EINVAL;
}
}
static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc);
static int dao_spdif_get_spos(struct dao *dao, unsigned int *spos)
{
((struct hw *)dao->hw)->dao_get_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_spdif_set_spos(struct dao *dao, unsigned int spos)
{
((struct hw *)dao->hw)->dao_set_spos(dao->ctrl_blk, spos);
return 0;
}
static int dao_commit_write(struct dao *dao)
{
((struct hw *)dao->hw)->dao_commit_write(dao->hw,
daio_device_index(dao->daio.type, dao->hw), dao->ctrl_blk);
return 0;
}
static int dao_set_left_input(struct dao *dao, struct rsc *input)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL);
if (!entry)
return -ENOMEM;
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscl.ops->master(&daio->rscl);
for (i = 0; i < daio->rscl.msr; i++, entry++) {
entry->slot = input->ops->output_slot(input);
entry->user = entry->addr = daio->rscl.ops->index(&daio->rscl);
dao->mgr->imap_add(dao->mgr, entry);
dao->imappers[i] = entry;
input->ops->next_conj(input);
daio->rscl.ops->next_conj(&daio->rscl);
}
input->ops->master(input);
daio->rscl.ops->master(&daio->rscl);
return 0;
}
static int dao_set_right_input(struct dao *dao, struct rsc *input)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL);
if (!entry)
return -ENOMEM;
/* Program master and conjugate resources */
input->ops->master(input);
daio->rscr.ops->master(&daio->rscr);
for (i = 0; i < daio->rscr.msr; i++, entry++) {
entry->slot = input->ops->output_slot(input);
entry->user = entry->addr = daio->rscr.ops->index(&daio->rscr);
dao->mgr->imap_add(dao->mgr, entry);
dao->imappers[daio->rscl.msr + i] = entry;
input->ops->next_conj(input);
daio->rscr.ops->next_conj(&daio->rscr);
}
input->ops->master(input);
daio->rscr.ops->master(&daio->rscr);
return 0;
}
static int dao_clear_left_input(struct dao *dao)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
if (!dao->imappers[0])
return 0;
entry = dao->imappers[0];
dao->mgr->imap_delete(dao->mgr, entry);
/* Program conjugate resources */
for (i = 1; i < daio->rscl.msr; i++) {
entry = dao->imappers[i];
dao->mgr->imap_delete(dao->mgr, entry);
dao->imappers[i] = NULL;
}
kfree(dao->imappers[0]);
dao->imappers[0] = NULL;
return 0;
}
static int dao_clear_right_input(struct dao *dao)
{
struct imapper *entry;
struct daio *daio = &dao->daio;
int i;
if (!dao->imappers[daio->rscl.msr])
return 0;
entry = dao->imappers[daio->rscl.msr];
dao->mgr->imap_delete(dao->mgr, entry);
/* Program conjugate resources */
for (i = 1; i < daio->rscr.msr; i++) {
entry = dao->imappers[daio->rscl.msr + i];
dao->mgr->imap_delete(dao->mgr, entry);
dao->imappers[daio->rscl.msr + i] = NULL;
}
kfree(dao->imappers[daio->rscl.msr]);
dao->imappers[daio->rscl.msr] = NULL;
return 0;
}
static struct dao_rsc_ops dao_ops = {
.set_spos = dao_spdif_set_spos,
.commit_write = dao_commit_write,
.get_spos = dao_spdif_get_spos,
.reinit = dao_rsc_reinit,
.set_left_input = dao_set_left_input,
.set_right_input = dao_set_right_input,
.clear_left_input = dao_clear_left_input,
.clear_right_input = dao_clear_right_input,
};
static int dai_set_srt_srcl(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
((struct hw *)dai->hw)->dai_srt_set_srcm(dai->ctrl_blk,
src->ops->index(src));
return 0;
}
static int dai_set_srt_srcr(struct dai *dai, struct rsc *src)
{
src->ops->master(src);
((struct hw *)dai->hw)->dai_srt_set_srco(dai->ctrl_blk,
src->ops->index(src));
return 0;
}
static int dai_set_srt_msr(struct dai *dai, unsigned int msr)
{
unsigned int rsr;
for (rsr = 0; msr > 1; msr >>= 1)
rsr++;
((struct hw *)dai->hw)->dai_srt_set_rsr(dai->ctrl_blk, rsr);
return 0;
}
static int dai_set_enb_src(struct dai *dai, unsigned int enb)
{
((struct hw *)dai->hw)->dai_srt_set_ec(dai->ctrl_blk, enb);
return 0;
}
static int dai_set_enb_srt(struct dai *dai, unsigned int enb)
{
((struct hw *)dai->hw)->dai_srt_set_et(dai->ctrl_blk, enb);
return 0;
}
static int dai_commit_write(struct dai *dai)
{
((struct hw *)dai->hw)->dai_commit_write(dai->hw,
daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
return 0;
}
static struct dai_rsc_ops dai_ops = {
.set_srt_srcl = dai_set_srt_srcl,
.set_srt_srcr = dai_set_srt_srcr,
.set_srt_msr = dai_set_srt_msr,
.set_enb_src = dai_set_enb_src,
.set_enb_srt = dai_set_enb_srt,
.commit_write = dai_commit_write,
};
static int daio_rsc_init(struct daio *daio,
const struct daio_desc *desc,
void *hw)
{
int err;
unsigned int idx_l, idx_r;
switch (((struct hw *)hw)->chip_type) {
case ATC20K1:
idx_l = idx_20k1[desc->type].left;
idx_r = idx_20k1[desc->type].right;
break;
case ATC20K2:
idx_l = idx_20k2[desc->type].left;
idx_r = idx_20k2[desc->type].right;
break;
default:
return -EINVAL;
}
err = rsc_init(&daio->rscl, idx_l, DAIO, desc->msr, hw);
if (err)
return err;
err = rsc_init(&daio->rscr, idx_r, DAIO, desc->msr, hw);
if (err)
goto error1;
/* Set daio->rscl/r->ops to daio specific ones */
if (desc->type <= DAIO_OUT_MAX) {
daio->rscl.ops = daio->rscr.ops = &daio_out_rsc_ops;
} else {
switch (((struct hw *)hw)->chip_type) {
case ATC20K1:
daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k1;
break;
case ATC20K2:
daio->rscl.ops = daio->rscr.ops = &daio_in_rsc_ops_20k2;
break;
default:
break;
}
}
daio->type = desc->type;
return 0;
error1:
rsc_uninit(&daio->rscl);
return err;
}
static int daio_rsc_uninit(struct daio *daio)
{
rsc_uninit(&daio->rscl);
rsc_uninit(&daio->rscr);
return 0;
}
static int dao_rsc_init(struct dao *dao,
const struct daio_desc *desc,
struct daio_mgr *mgr)
{
struct hw *hw = mgr->mgr.hw;
unsigned int conf;
int err;
err = daio_rsc_init(&dao->daio, desc, mgr->mgr.hw);
if (err)
return err;
dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
if (!dao->imappers) {
err = -ENOMEM;
goto error1;
}
dao->ops = &dao_ops;
dao->mgr = mgr;
dao->hw = hw;
err = hw->dao_get_ctrl_blk(&dao->ctrl_blk);
if (err)
goto error2;
hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw));
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
conf = (desc->msr & 0x7) | (desc->passthru << 3);
hw->daio_mgr_dao_init(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw), conf);
hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
daio_device_index(dao->daio.type, hw));
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
return 0;
error2:
kfree(dao->imappers);
dao->imappers = NULL;
error1:
daio_rsc_uninit(&dao->daio);
return err;
}
static int dao_rsc_uninit(struct dao *dao)
{
if (dao->imappers) {
if (dao->imappers[0])
dao_clear_left_input(dao);
if (dao->imappers[dao->daio.rscl.msr])
dao_clear_right_input(dao);
kfree(dao->imappers);
dao->imappers = NULL;
}
((struct hw *)dao->hw)->dao_put_ctrl_blk(dao->ctrl_blk);
dao->hw = dao->ctrl_blk = NULL;
daio_rsc_uninit(&dao->daio);
return 0;
}
static int dao_rsc_reinit(struct dao *dao, const struct dao_desc *desc)
{
struct daio_mgr *mgr = dao->mgr;
struct daio_desc dsc = {0};
dsc.type = dao->daio.type;
dsc.msr = desc->msr;
dsc.passthru = desc->passthru;
dao_rsc_uninit(dao);
return dao_rsc_init(dao, &dsc, mgr);
}
static int dai_rsc_init(struct dai *dai,
const struct daio_desc *desc,
struct daio_mgr *mgr)
{
int err;
struct hw *hw = mgr->mgr.hw;
unsigned int rsr, msr;
err = daio_rsc_init(&dai->daio, desc, mgr->mgr.hw);
if (err)
return err;
dai->ops = &dai_ops;
dai->hw = mgr->mgr.hw;
err = hw->dai_get_ctrl_blk(&dai->ctrl_blk);
if (err)
goto error1;
for (rsr = 0, msr = desc->msr; msr > 1; msr >>= 1)
rsr++;
hw->dai_srt_set_rsr(dai->ctrl_blk, rsr);
hw->dai_srt_set_drat(dai->ctrl_blk, 0);
/* default to disabling control of a SRC */
hw->dai_srt_set_ec(dai->ctrl_blk, 0);
hw->dai_srt_set_et(dai->ctrl_blk, 0); /* default to disabling SRT */
hw->dai_commit_write(hw,
daio_device_index(dai->daio.type, dai->hw), dai->ctrl_blk);
return 0;
error1:
daio_rsc_uninit(&dai->daio);
return err;
}
static int dai_rsc_uninit(struct dai *dai)
{
((struct hw *)dai->hw)->dai_put_ctrl_blk(dai->ctrl_blk);
dai->hw = dai->ctrl_blk = NULL;
daio_rsc_uninit(&dai->daio);
return 0;
}
static int daio_mgr_get_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
{
if (((union daio_usage *)mgr->rscs)->data & (0x1 << type))
return -ENOENT;
((union daio_usage *)mgr->rscs)->data |= (0x1 << type);
return 0;
}
static int daio_mgr_put_rsc(struct rsc_mgr *mgr, enum DAIOTYP type)
{
((union daio_usage *)mgr->rscs)->data &= ~(0x1 << type);
return 0;
}
static int get_daio_rsc(struct daio_mgr *mgr,
const struct daio_desc *desc,
struct daio **rdaio)
{
int err;
struct dai *dai = NULL;
struct dao *dao = NULL;
unsigned long flags;
*rdaio = NULL;
/* Check whether there are sufficient daio resources to meet request. */
spin_lock_irqsave(&mgr->mgr_lock, flags);
err = daio_mgr_get_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (err) {
printk(KERN_ERR "Can't meet DAIO resource request!\n");
return err;
}
/* Allocate mem for daio resource */
if (desc->type <= DAIO_OUT_MAX) {
dao = kzalloc(sizeof(*dao), GFP_KERNEL);
if (!dao) {
err = -ENOMEM;
goto error;
}
err = dao_rsc_init(dao, desc, mgr);
if (err)
goto error;
*rdaio = &dao->daio;
} else {
dai = kzalloc(sizeof(*dai), GFP_KERNEL);
if (!dai) {
err = -ENOMEM;
goto error;
}
err = dai_rsc_init(dai, desc, mgr);
if (err)
goto error;
*rdaio = &dai->daio;
}
mgr->daio_enable(mgr, *rdaio);
mgr->commit_write(mgr);
return 0;
error:
if (dao)
kfree(dao);
else if (dai)
kfree(dai);
spin_lock_irqsave(&mgr->mgr_lock, flags);
daio_mgr_put_rsc(&mgr->mgr, desc->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
return err;
}
static int put_daio_rsc(struct daio_mgr *mgr, struct daio *daio)
{
unsigned long flags;
mgr->daio_disable(mgr, daio);
mgr->commit_write(mgr);
spin_lock_irqsave(&mgr->mgr_lock, flags);
daio_mgr_put_rsc(&mgr->mgr, daio->type);
spin_unlock_irqrestore(&mgr->mgr_lock, flags);
if (daio->type <= DAIO_OUT_MAX) {
dao_rsc_uninit(container_of(daio, struct dao, daio));
kfree(container_of(daio, struct dao, daio));
} else {
dai_rsc_uninit(container_of(daio, struct dai, daio));
kfree(container_of(daio, struct dai, daio));
}
return 0;
}
static int daio_mgr_enb_daio(struct daio_mgr *mgr, struct daio *daio)
{
struct hw *hw = mgr->mgr.hw;
if (DAIO_OUT_MAX >= daio->type) {
hw->daio_mgr_enb_dao(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
} else {
hw->daio_mgr_enb_dai(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
}
return 0;
}
static int daio_mgr_dsb_daio(struct daio_mgr *mgr, struct daio *daio)
{
struct hw *hw = mgr->mgr.hw;
if (DAIO_OUT_MAX >= daio->type) {
hw->daio_mgr_dsb_dao(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
} else {
hw->daio_mgr_dsb_dai(mgr->mgr.ctrl_blk,
daio_device_index(daio->type, hw));
}
return 0;
}
static int daio_map_op(void *data, struct imapper *entry)
{
struct rsc_mgr *mgr = &((struct daio_mgr *)data)->mgr;
struct hw *hw = mgr->hw;
hw->daio_mgr_set_imaparc(mgr->ctrl_blk, entry->slot);
hw->daio_mgr_set_imapnxt(mgr->ctrl_blk, entry->next);
hw->daio_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr);
hw->daio_mgr_commit_write(mgr->hw, mgr->ctrl_blk);
return 0;
}
static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
if (!entry->addr && mgr->init_imap_added) {
input_mapper_delete(&mgr->imappers, mgr->init_imap,
daio_map_op, mgr);
mgr->init_imap_added = 0;
}
err = input_mapper_add(&mgr->imappers, entry, daio_map_op, mgr);
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
static int daio_imap_delete(struct daio_mgr *mgr, struct imapper *entry)
{
unsigned long flags;
int err;
spin_lock_irqsave(&mgr->imap_lock, flags);
err = input_mapper_delete(&mgr->imappers, entry, daio_map_op, mgr);
if (list_empty(&mgr->imappers)) {
input_mapper_add(&mgr->imappers, mgr->init_imap,
daio_map_op, mgr);
mgr->init_imap_added = 1;
}
spin_unlock_irqrestore(&mgr->imap_lock, flags);
return err;
}
static int daio_mgr_commit_write(struct daio_mgr *mgr)
{
struct hw *hw = mgr->mgr.hw;
hw->daio_mgr_commit_write(hw, mgr->mgr.ctrl_blk);
return 0;
}
int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
{
int err, i;
struct daio_mgr *daio_mgr;
struct imapper *entry;
*rdaio_mgr = NULL;
daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL);
if (!daio_mgr)
return -ENOMEM;
err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw);
if (err)
goto error1;
spin_lock_init(&daio_mgr->mgr_lock);
spin_lock_init(&daio_mgr->imap_lock);
INIT_LIST_HEAD(&daio_mgr->imappers);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto error2;
}
entry->slot = entry->addr = entry->next = entry->user = 0;
list_add(&entry->list, &daio_mgr->imappers);
daio_mgr->init_imap = entry;
daio_mgr->init_imap_added = 1;
daio_mgr->get_daio = get_daio_rsc;
daio_mgr->put_daio = put_daio_rsc;
daio_mgr->daio_enable = daio_mgr_enb_daio;
daio_mgr->daio_disable = daio_mgr_dsb_daio;
daio_mgr->imap_add = daio_imap_add;
daio_mgr->imap_delete = daio_imap_delete;
daio_mgr->commit_write = daio_mgr_commit_write;
for (i = 0; i < 8; i++) {
((struct hw *)hw)->daio_mgr_dsb_dao(daio_mgr->mgr.ctrl_blk, i);
((struct hw *)hw)->daio_mgr_dsb_dai(daio_mgr->mgr.ctrl_blk, i);
}
((struct hw *)hw)->daio_mgr_commit_write(hw, daio_mgr->mgr.ctrl_blk);
*rdaio_mgr = daio_mgr;
return 0;
error2:
rsc_mgr_uninit(&daio_mgr->mgr);
error1:
kfree(daio_mgr);
return err;
}
int daio_mgr_destroy(struct daio_mgr *daio_mgr)
{
unsigned long flags;
/* free daio input mapper list */
spin_lock_irqsave(&daio_mgr->imap_lock, flags);
free_input_mapper_list(&daio_mgr->imappers);
spin_unlock_irqrestore(&daio_mgr->imap_lock, flags);
rsc_mgr_uninit(&daio_mgr->mgr);
kfree(daio_mgr);
return 0;
}
| gpl-2.0 |
zparallax/amplitude_aosp_12 | mm/rmap.c | 1205 | 53698 | /*
* mm/rmap.c - physical to virtual reverse mappings
*
* Copyright 2001, Rik van Riel <riel@conectiva.com.br>
* Released under the General Public License (GPL).
*
* Simple, low overhead reverse mapping scheme.
* Please try to keep this thing as modular as possible.
*
* Provides methods for unmapping each kind of mapped page:
* the anon methods track anonymous pages, and
* the file methods track pages belonging to an inode.
*
* Original design by Rik van Riel <riel@conectiva.com.br> 2001
* File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
* Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
* Contributions by Hugh Dickins 2003, 2004
*/
/*
* Lock ordering in mm:
*
* inode->i_mutex (while writing or truncating, not reading or faulting)
* mm->mmap_sem
* page->flags PG_locked (lock_page)
* mapping->i_mmap_mutex
* anon_vma->mutex
* mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within bdi.wb->list_lock in __sync_single_inode)
*
* anon_vma->mutex,mapping->i_mutex (memory_failure, collect_procs_anon)
* ->tasklist_lock
* pte map lock
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include "internal.h"
static struct kmem_cache *anon_vma_cachep;
static struct kmem_cache *anon_vma_chain_cachep;
static inline struct anon_vma *anon_vma_alloc(void)
{
struct anon_vma *anon_vma;
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
*/
anon_vma->root = anon_vma;
}
return anon_vma;
}
static inline void anon_vma_free(struct anon_vma *anon_vma)
{
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
* Synchronize against page_lock_anon_vma() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
* mutex_trylock() from page_lock_anon_vma(). This orders:
*
* page_lock_anon_vma() VS put_anon_vma()
* mutex_trylock() atomic_dec_and_test()
* LOCK MB
* atomic_read() mutex_is_locked()
*
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
if (mutex_is_locked(&anon_vma->root->mutex)) {
anon_vma_lock(anon_vma);
anon_vma_unlock(anon_vma);
}
kmem_cache_free(anon_vma_cachep, anon_vma);
}
static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
{
return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
}
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
{
kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
}
static void anon_vma_chain_link(struct vm_area_struct *vma,
struct anon_vma_chain *avc,
struct anon_vma *anon_vma)
{
avc->vma = vma;
avc->anon_vma = anon_vma;
list_add(&avc->same_vma, &vma->anon_vma_chain);
/*
* It's critical to add new vmas to the tail of the anon_vma,
* see comment in huge_memory.c:__split_huge_page().
*/
list_add_tail(&avc->same_anon_vma, &anon_vma->head);
}
/**
* anon_vma_prepare - attach an anon_vma to a memory region
* @vma: the memory region in question
*
* This makes sure the memory mapping described by 'vma' has
* an 'anon_vma' attached to it, so that we can associate the
* anonymous pages mapped into it with that anon_vma.
*
* The common case will be that we already have one, but if
* not we either need to find an adjacent mapping that we
* can re-use the anon_vma from (very common when the only
* reason for splitting a vma has been mprotect()), or we
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
* optimistically looked up an anon_vma in page_lock_anon_vma()
* and that may actually touch the spinlock even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
*
* As a result, we need to do proper anon_vma locking even
* for the new allocation. At the same time, we do not want
* to do any locking for the common case of already having
* an anon_vma.
*
* This must be called with the mmap_sem held for reading.
*/
int anon_vma_prepare(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
might_sleep();
if (unlikely(!anon_vma)) {
struct mm_struct *mm = vma->vm_mm;
struct anon_vma *allocated;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_enomem;
anon_vma = find_mergeable_anon_vma(vma);
allocated = NULL;
if (!anon_vma) {
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
allocated = anon_vma;
}
anon_vma_lock(anon_vma);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
allocated = NULL;
avc = NULL;
}
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(anon_vma);
if (unlikely(allocated))
put_anon_vma(allocated);
if (unlikely(avc))
anon_vma_chain_free(avc);
}
return 0;
out_enomem_free_avc:
anon_vma_chain_free(avc);
out_enomem:
return -ENOMEM;
}
/*
* This is a useful helper function for locking the anon_vma root as
* we traverse the vma->anon_vma_chain, looping over anon_vma's that
* have the same vma.
*
* Such anon_vma's should have the same root, so you'd expect to see
* just a single mutex_lock for the whole traversal.
*/
static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
{
struct anon_vma *new_root = anon_vma->root;
if (new_root != root) {
if (WARN_ON_ONCE(root))
mutex_unlock(&root->mutex);
root = new_root;
mutex_lock(&root->mutex);
}
return root;
}
static inline void unlock_anon_vma_root(struct anon_vma *root)
{
if (root)
mutex_unlock(&root->mutex);
}
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!avc)) {
unlock_anon_vma_root(root);
root = NULL;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto enomem_failure;
}
anon_vma = pavc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
}
unlock_anon_vma_root(root);
return 0;
enomem_failure:
unlink_anon_vmas(dst);
return -ENOMEM;
}
/*
* Some rmap walk that needs to find all ptes/hugepmds without false
* negatives (like migrate and split_huge_page) running concurrent
* with operations that copy or move pagetables (like mremap() and
* fork()) to be safe. They depend on the anon_vma "same_anon_vma"
* list to be in a certain order: the dst_vma must be placed after the
* src_vma in the list. This is always guaranteed by fork() but
* mremap() needs to call this function to enforce it in case the
* dst_vma isn't newly allocated and chained with the anon_vma_clone()
* function but just an extension of a pre-existing vma through
* vma_merge.
*
* NOTE: the same_anon_vma list can still be changed by other
* processes while mremap runs because mremap doesn't hold the
* anon_vma mutex to prevent modifications to the list while it
* runs. All we need to enforce is that the relative order of this
* process vmas isn't changing (we don't care about other vmas
* order). Each vma corresponds to an anon_vma_chain structure so
* there's no risk that other processes calling anon_vma_moveto_tail()
* and changing the same_anon_vma list under mremap() will screw with
* the relative order of this process vmas in the list, because we
* they can't alter the order of any vma that belongs to this
* process. And there can't be another anon_vma_moveto_tail() running
* concurrently with mremap() coming from this process because we hold
* the mmap_sem for the whole mremap(). fork() ordering dependency
* also shouldn't be affected because fork() only cares that the
* parent vmas are placed in the list before the child vmas and
* anon_vma_moveto_tail() won't reorder vmas from either the fork()
* parent or child.
*/
void anon_vma_moveto_tail(struct vm_area_struct *dst)
{
struct anon_vma_chain *pavc;
struct anon_vma *root = NULL;
list_for_each_entry_reverse(pavc, &dst->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = pavc->anon_vma;
VM_BUG_ON(pavc->vma != dst);
root = lock_anon_vma_root(root, anon_vma);
list_del(&pavc->same_anon_vma);
list_add_tail(&pavc->same_anon_vma, &anon_vma->head);
}
unlock_anon_vma_root(root);
}
/*
* Attach vma to its own anon_vma, as well as to the anon_vmas that
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
/* Don't bother if the parent process has no anon_vma here. */
if (!pvma->anon_vma)
return 0;
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
*/
if (anon_vma_clone(vma, pvma))
return -ENOMEM;
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
/*
* The root anon_vma's spinlock is the lock actually used when we
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
* this anon_vma is freed, because the lock lives in the root.
*/
get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */
vma->anon_vma = anon_vma;
anon_vma_lock(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma_unlock(anon_vma);
return 0;
out_error_free_anon_vma:
put_anon_vma(anon_vma);
out_error:
unlink_anon_vmas(vma);
return -ENOMEM;
}
void unlink_anon_vmas(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc, *next;
struct anon_vma *root = NULL;
/*
* Unlink each anon_vma chained to the VMA. This list is ordered
* from newest to oldest, ensuring the root anon_vma gets freed last.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
list_del(&avc->same_anon_vma);
/*
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
if (list_empty(&anon_vma->head))
continue;
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
unlock_anon_vma_root(root);
/*
* Iterate the list once more, it now only contains empty and unlinked
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
* needing to acquire the anon_vma->root->mutex.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
}
static void anon_vma_ctor(void *data)
{
struct anon_vma *anon_vma = data;
mutex_init(&anon_vma->mutex);
atomic_set(&anon_vma->refcount, 0);
INIT_LIST_HEAD(&anon_vma->head);
}
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
}
/*
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
*
* Since there is no serialization what so ever against page_remove_rmap()
* the best this function can do is return a locked anon_vma that might
* have been relevant to this page.
*
* The page might have been remapped to a different anon_vma or the anon_vma
* returned may already be freed (and even reused).
*
* In case it was remapped to a different anon_vma, the new anon_vma will be a
* child of the old anon_vma, and the anon_vma lifetime rules will therefore
* ensure that any anon_vma obtained from the page will still be valid for as
* long as we observe page_mapped() [ hence all those page_mapped() tests ].
*
* All users of this function must be very careful when walking the anon_vma
* chain and verify that the page in question is indeed mapped in it
* [ something equivalent to page_mapped_in_vma() ].
*
* Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
* that the anon_vma pointer from page->mapping is valid if there is a
* mapcount, we can dereference the anon_vma after observing those.
*/
struct anon_vma *page_get_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
/*
* If this page is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma:
* SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt).
*/
if (!page_mapped(page)) {
put_anon_vma(anon_vma);
anon_vma = NULL;
}
out:
rcu_read_unlock();
return anon_vma;
}
/*
* Similar to page_get_anon_vma() except it locks the anon_vma.
*
* Its a little more complex as it tries to keep the fast path to a single
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with page_get_anon_vma() and then block on the mutex.
*/
struct anon_vma *page_lock_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = ACCESS_ONCE(anon_vma->root);
if (mutex_trylock(&root_anon_vma->mutex)) {
/*
* If the page is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
if (!page_mapped(page)) {
mutex_unlock(&root_anon_vma->mutex);
anon_vma = NULL;
}
goto out;
}
/* trylock failed, we got to sleep */
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
if (!page_mapped(page)) {
put_anon_vma(anon_vma);
anon_vma = NULL;
goto out;
}
/* we pinned the anon_vma, its safe to sleep */
rcu_read_unlock();
anon_vma_lock(anon_vma);
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
* we'll deadlock on the anon_vma_lock() recursion.
*/
anon_vma_unlock(anon_vma);
__put_anon_vma(anon_vma);
anon_vma = NULL;
}
return anon_vma;
out:
rcu_read_unlock();
return anon_vma;
}
void page_unlock_anon_vma(struct anon_vma *anon_vma)
{
anon_vma_unlock(anon_vma);
}
/*
* At what user virtual address is page expected in @vma?
* Returns virtual address or -EFAULT if page's index/offset is not
* within the range mapped the @vma.
*/
inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
unsigned long address;
if (unlikely(is_vm_hugetlb_page(vma)))
pgoff = page->index << huge_page_order(page_hstate(page));
address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
/* page should be within @vma mapping range */
return -EFAULT;
}
return address;
}
/*
* At what user virtual address is page expected in vma?
* Caller should check the page is actually part of the vma.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
* Note: swapoff's unuse_vma() is more efficient with this
* check, and needs it to match anon_vma when KSM is active.
*/
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
vma->vm_file->f_mapping != page->mapping)
return -EFAULT;
} else
return -EFAULT;
return vma_address(page, vma);
}
/*
* Check that @page is mapped at @address into @mm.
*
* If @sync is false, page_check_address may perform a racy check to avoid
* the page table lock when the pte is not present (helpful when reclaiming
* highly shared pages).
*
* On success returns with pte mapped and locked.
*/
pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp, int sync)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
pte = huge_pte_offset(mm, address);
ptl = &mm->page_table_lock;
goto check;
}
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
return NULL;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return NULL;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return NULL;
if (pmd_trans_huge(*pmd))
return NULL;
pte = pte_offset_map(pmd, address);
/* Make a quick check before getting the lock */
if (!sync && !pte_present(*pte)) {
pte_unmap(pte);
return NULL;
}
ptl = pte_lockptr(mm, pmd);
check:
spin_lock(ptl);
if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
*ptlp = ptl;
return pte;
}
pte_unmap_unlock(pte, ptl);
return NULL;
}
/**
* page_mapped_in_vma - check whether a page is really mapped in a VMA
* @page: the page to test
* @vma: the VMA to test
*
* Returns 1 if the page is mapped into the page tables of the VMA, 0
* if the page is not mapped into the page tables of this VMA. Only
* valid for normal file or anonymous VMAs.
*/
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
unsigned long address;
pte_t *pte;
spinlock_t *ptl;
address = vma_address(page, vma);
if (address == -EFAULT) /* out of vma range */
return 0;
pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
if (!pte) /* the page is not in this mm */
return 0;
pte_unmap_unlock(pte, ptl);
return 1;
}
/*
* Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
int page_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, unsigned int *mapcount,
unsigned long *vm_flags)
{
struct mm_struct *mm = vma->vm_mm;
int referenced = 0;
if (unlikely(PageTransHuge(page))) {
pmd_t *pmd;
spin_lock(&mm->page_table_lock);
/*
* rmap might return false positives; we must filter
* these out using page_check_address_pmd().
*/
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_FLAG);
if (!pmd) {
spin_unlock(&mm->page_table_lock);
goto out;
}
if (vma->vm_flags & VM_LOCKED) {
spin_unlock(&mm->page_table_lock);
*mapcount = 0; /* break early from loop */
*vm_flags |= VM_LOCKED;
goto out;
}
/* go ahead even if the pmd is pmd_trans_splitting() */
if (pmdp_clear_flush_young_notify(vma, address, pmd))
referenced++;
spin_unlock(&mm->page_table_lock);
} else {
pte_t *pte;
spinlock_t *ptl;
/*
* rmap might return false positives; we must filter
* these out using page_check_address().
*/
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
if (vma->vm_flags & VM_LOCKED) {
pte_unmap_unlock(pte, ptl);
*mapcount = 0; /* break early from loop */
*vm_flags |= VM_LOCKED;
goto out;
}
if (ptep_clear_flush_young_notify(vma, address, pte)) {
/*
* Don't treat a reference through a sequentially read
* mapping as such. If the page has been used in
* another mapping, we will catch it; if this other
* mapping is already gone, the unmap path will have
* set PG_referenced or activated the page.
*/
if (likely(!VM_SequentialReadHint(vma)))
referenced++;
}
pte_unmap_unlock(pte, ptl);
}
(*mapcount)--;
if (referenced)
*vm_flags |= vma->vm_flags;
out:
return referenced;
}
static int page_referenced_anon(struct page *page,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
struct anon_vma_chain *avc;
int referenced = 0;
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return referenced;
mapcount = page_mapcount(page);
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
* cgroups
*/
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
continue;
referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
}
page_unlock_anon_vma(anon_vma);
return referenced;
}
/**
* page_referenced_file - referenced check for object-based rmap
* @page: the page we're checking references on.
* @memcg: target memory control group
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page
*
* For an object-based mapped page, find all the places it is mapped and
* check/clear the referenced flag. This is done by following the page->mapping
* pointer, then walking the chain of vmas it holds. It returns the number
* of references it found.
*
* This function is only called from page_referenced for object-based pages.
*/
static int page_referenced_file(struct page *page,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
struct prio_tree_iter iter;
int referenced = 0;
/*
* The caller's checks on page->mapping and !PageAnon have made
* sure that this is a file page: the check for page->mapping
* excludes the case just before it gets set on an anon page.
*/
BUG_ON(PageAnon(page));
/*
* The page lock not only makes sure that page->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the
* structure at mapping cannot be freed and reused yet,
* so we can safely take mapping->i_mmap_mutex.
*/
BUG_ON(!PageLocked(page));
mutex_lock(&mapping->i_mmap_mutex);
/*
* i_mmap_mutex does not stabilize mapcount at all, but mapcount
* is more likely to be accurate if we note it after spinning.
*/
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
* cgroups
*/
if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
continue;
referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
}
mutex_unlock(&mapping->i_mmap_mutex);
return referenced;
}
/**
* page_referenced - test if the page was referenced
* @page: the page to test
* @is_locked: caller holds lock on the page
* @memcg: target memory cgroup
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
int page_referenced(struct page *page,
int is_locked,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
int referenced = 0;
int we_locked = 0;
*vm_flags = 0;
if (page_mapped(page) && page_rmapping(page)) {
if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
we_locked = trylock_page(page);
if (!we_locked) {
referenced++;
goto out;
}
}
if (unlikely(PageKsm(page)))
referenced += page_referenced_ksm(page, memcg,
vm_flags);
else if (PageAnon(page))
referenced += page_referenced_anon(page, memcg,
vm_flags);
else if (page->mapping)
referenced += page_referenced_file(page, memcg,
vm_flags);
if (we_locked)
unlock_page(page);
if (page_test_and_clear_young(page_to_pfn(page)))
referenced++;
}
out:
return referenced;
}
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
goto out;
if (pte_dirty(*pte) || pte_write(*pte)) {
pte_t entry;
flush_cache_page(vma, address, pte_pfn(*pte));
entry = ptep_clear_flush_notify(vma, address, pte);
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(mm, address, pte, entry);
ret = 1;
}
pte_unmap_unlock(pte, ptl);
out:
return ret;
}
static int page_mkclean_file(struct address_space *mapping, struct page *page)
{
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
struct prio_tree_iter iter;
int ret = 0;
BUG_ON(PageAnon(page));
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (vma->vm_flags & VM_SHARED) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret += page_mkclean_one(page, vma, address);
}
}
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
int page_mkclean(struct page *page)
{
int ret = 0;
BUG_ON(!PageLocked(page));
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
if (mapping) {
ret = page_mkclean_file(mapping, page);
if (page_test_and_clear_dirty(page_to_pfn(page), 1))
ret = 1;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(page_mkclean);
/**
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to
* @address: the user virtual address mapped
*
* When a page belongs exclusively to one process after a COW event,
* that page can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling
* processes.
*/
void page_move_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!anon_vma);
VM_BUG_ON(page->index != linear_page_index(vma, address));
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
}
/**
* __page_set_anon_rmap - set up new anonymous rmap
* @page: Page to add to rmap
* @vma: VM area to add page to.
* @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
*/
static void __page_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
if (PageAnon(page))
return;
/*
* If the page isn't exclusively mapped into this vma,
* we must use the _oldest_ possible anon_vma for the
* page mapping!
*/
if (!exclusive)
anon_vma = anon_vma->root;
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
}
/**
* __page_check_anon_rmap - sanity check anonymous rmap addition
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_check_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
* We have exclusion against page_add_anon_rmap because the caller
* always holds the page locked, except if called from page_dup_rmap,
* in which case the page is already known to be setup.
*
* We have exclusion against page_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
* over the call to page_add_new_anon_rmap.
*/
BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
BUG_ON(page->index != linear_page_index(vma, address));
#endif
}
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* The caller needs to hold the pte lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting,
* and to ensure that PageAnon is not being upgraded racily to PageKsm
* (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
do_page_add_anon_rmap(page, vma, address, 0);
}
/*
* Special version of the above for do_swap_page, which often runs
* into pages that are exclusively owned by the current process.
* Everybody else should continue to use page_add_anon_rmap above.
*/
void do_page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
int first = atomic_inc_and_test(&page->_mapcount);
if (first) {
if (!PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_PAGES);
else
__inc_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
}
if (unlikely(PageKsm(page)))
return;
VM_BUG_ON(!PageLocked(page));
/* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
else
__page_check_anon_rmap(page, vma, address);
}
/**
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
* Page does not have to be locked.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
if (!PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_PAGES);
else
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__page_set_anon_rmap(page, vma, address, 1);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
else
add_page_to_unevictable_list(page);
}
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
*
* The caller needs to hold the pte lock.
*/
void page_add_file_rmap(struct page *page)
{
bool locked;
unsigned long flags;
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
}
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
*
* The caller needs to hold the pte lock.
*/
void page_remove_rmap(struct page *page)
{
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
/*
* The anon case has no mem_cgroup page_stat to update; but may
* uncharge_page() below, where the lock ordering can deadlock if
* we hold the lock against page_stat move: so avoid it on anon.
*/
if (!anon)
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
/*
* Now that the last pte has gone, s390 must transfer dirty
* flag from storage key to struct page. We can usually skip
* this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
*/
if ((!anon || PageSwapCache(page)) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.
*/
if (unlikely(PageHuge(page)))
goto out;
if (anon) {
mem_cgroup_uncharge_page(page);
if (!PageTransHuge(page))
__dec_zone_page_state(page, NR_ANON_PAGES);
else
__dec_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
}
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
out:
if (!anon)
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
/*
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
*/
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, enum ttu_flags flags)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
/*
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
if (!(flags & TTU_IGNORE_MLOCK)) {
if (vma->vm_flags & VM_LOCKED)
goto out_mlock;
if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
}
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
ret = SWAP_FAIL;
goto out_unmap;
}
}
/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
pteval = ptep_clear_flush_notify(vma, address, pte);
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
if (PageAnon(page))
dec_mm_counter(mm, MM_ANONPAGES);
else
dec_mm_counter(mm, MM_FILEPAGES);
set_pte_at(mm, address, pte,
swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
if (PageSwapCache(page)) {
/*
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pte, pteval);
ret = SWAP_FAIL;
goto out_unmap;
}
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
/*
* Store the pfn of the page in a special migration
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(TTU_ACTION(flags) == TTU_MIGRATION)) {
/* Establish migration entry for a file page */
swp_entry_t entry;
entry = make_migration_entry(page, pte_write(pteval));
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
} else
dec_mm_counter(mm, MM_FILEPAGES);
page_remove_rmap(page);
page_cache_release(page);
out_unmap:
pte_unmap_unlock(pte, ptl);
out:
return ret;
out_mlock:
pte_unmap_unlock(pte, ptl);
/*
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
* unstable result and race. Plus, We can't wait here because
* we now hold anon_vma->mutex or mapping->i_mmap_mutex.
* if trylock failed, the page remain in evictable lru and later
* vmscan could retry to move the page to unevictable lru if the
* page is actually mlocked.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) {
mlock_vma_page(page);
ret = SWAP_MLOCK;
}
up_read(&vma->vm_mm->mmap_sem);
}
return ret;
}
/*
* objrmap doesn't work for nonlinear VMAs because the assumption that
* offset-into-file correlates with offset-into-virtual-addresses does not hold.
* Consequently, given a particular page and its ->index, we cannot locate the
* ptes which are mapping that page without an exhaustive linear search.
*
* So what this code does is a mini "virtual scan" of each nonlinear VMA which
* maps the file to which the target page belongs. The ->vm_private_data field
* holds the current cursor into that scan. Successive searches will circulate
* around the vma's virtual address space.
*
* So as more replacement pressure is applied to the pages in a nonlinear VMA,
* more scanning pressure is placed against them as well. Eventually pages
* will become fully unmapped and are eligible for eviction.
*
* For very sparsely populated VMAs this is a little inefficient - chances are
* there there won't be many ptes located within the scan cluster. In this case
* maybe we could scan further - to the end of the pte page, perhaps.
*
* Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
* acquire it without blocking. If vma locked, mlock the pages in the cluster,
* rather than unmapping them. If we encounter the "check_page" that vmscan is
* trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
*/
#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
struct vm_area_struct *vma, struct page *check_page)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page;
unsigned long address;
unsigned long end;
int ret = SWAP_AGAIN;
int locked_vma = 0;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
if (address < vma->vm_start)
address = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
return ret;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return ret;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return ret;
/*
* If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, address, *pte);
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
mlock_vma_page(page); /* no-op if already mlocked */
if (page == check_page)
ret = SWAP_MLOCK;
continue; /* don't unmap */
}
if (ptep_clear_flush_young_notify(vma, address, pte))
continue;
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush_notify(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address))
set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, MM_FILEPAGES);
(*mapcount)--;
}
pte_unmap_unlock(pte - 1, ptl);
if (locked_vma)
up_read(&vma->vm_mm->mmap_sem);
return ret;
}
bool is_vma_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
if (!maybe_stack)
return false;
if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
VM_STACK_INCOMPLETE_SETUP)
return true;
return false;
}
/**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method
* @page: the page to unmap/unlock
* @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
* This function is only called from try_to_unmap/try_to_munlock for
* anonymous pages.
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return ret;
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long address;
/*
* During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the
* page tables leading to a race where migration cannot
* find the migration ptes. Rather than increasing the
* locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
is_vma_temporary_stack(vma))
continue;
address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page))
break;
}
page_unlock_anon_vma(anon_vma);
return ret;
}
/**
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
* @page: the page to unmap/unlock
* @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
* This function is only called from try_to_unmap/try_to_munlock for
* object-based pages.
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
struct prio_tree_iter iter;
int ret = SWAP_AGAIN;
unsigned long cursor;
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = try_to_unmap_one(page, vma, address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page))
goto out;
}
if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
/*
* We don't bother to try to find the munlocked page in nonlinears.
* It's costly. Instead, later, page reclaim logic may call
* try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
*/
if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
cursor = vma->vm_end - vma->vm_start;
if (cursor > max_nl_size)
max_nl_size = cursor;
}
if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
ret = SWAP_FAIL;
goto out;
}
/*
* We don't try to search for this page in the nonlinear vmas,
* and page_referenced wouldn't have found it anyway. Instead
* just walk the nonlinear vmas trying to age and unmap some.
* The mapcount of the page we came in with is irrelevant,
* but even so use it as a guide to how hard we should try?
*/
mapcount = page_mapcount(page);
if (!mapcount)
goto out;
cond_resched();
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
if (max_nl_cursor == 0)
max_nl_cursor = CLUSTER_SIZE;
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
if (try_to_unmap_cluster(cursor, &mapcount,
vma, page) == SWAP_MLOCK)
ret = SWAP_MLOCK;
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
goto out;
}
vma->vm_private_data = (void *) max_nl_cursor;
}
cond_resched();
max_nl_cursor += CLUSTER_SIZE;
} while (max_nl_cursor <= max_nl_size);
/*
* Don't loop forever (perhaps all the remaining pages are
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
vma->vm_private_data = NULL;
out:
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
* Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
int try_to_unmap(struct page *page, enum ttu_flags flags)
{
int ret;
BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
if (unlikely(PageKsm(page)))
ret = try_to_unmap_ksm(page, flags);
else if (PageAnon(page))
ret = try_to_unmap_anon(page, flags);
else
ret = try_to_unmap_file(page, flags);
if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS;
return ret;
}
/**
* try_to_munlock - try to munlock a page
* @page: the page to be munlocked
*
* Called from munlock code. Checks all of the VMAs mapping the page
* to make sure nobody else has this page mlocked. The page will be
* returned with PG_mlocked cleared if no other vmas have it mlocked.
*
* Return values are:
*
* SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
* SWAP_FAIL - page cannot be located at present
* SWAP_MLOCK - page is now mlocked.
*/
int try_to_munlock(struct page *page)
{
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
if (unlikely(PageKsm(page)))
return try_to_unmap_ksm(page, TTU_MUNLOCK);
else if (PageAnon(page))
return try_to_unmap_anon(page, TTU_MUNLOCK);
else
return try_to_unmap_file(page, TTU_MUNLOCK);
}
void __put_anon_vma(struct anon_vma *anon_vma)
{
struct anon_vma *root = anon_vma->root;
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
anon_vma_free(root);
anon_vma_free(anon_vma);
}
#ifdef CONFIG_MIGRATION
/*
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
{
struct anon_vma *anon_vma;
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
/*
* Note: remove_migration_ptes() cannot use page_lock_anon_vma()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_sem. Users without mmap_sem are required to
* take a reference count to prevent the anon_vma disappearing
*/
anon_vma = page_anon_vma(page);
if (!anon_vma)
return ret;
anon_vma_lock(anon_vma);
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = rmap_one(page, vma, address, arg);
if (ret != SWAP_AGAIN)
break;
}
anon_vma_unlock(anon_vma);
return ret;
}
static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
struct vm_area_struct *vma;
struct prio_tree_iter iter;
int ret = SWAP_AGAIN;
if (!mapping)
return ret;
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
if (address == -EFAULT)
continue;
ret = rmap_one(page, vma, address, arg);
if (ret != SWAP_AGAIN)
break;
}
/*
* No nonlinear handling: being always shared, nonlinear vmas
* never contain migration ptes. Decide what to do about this
* limitation to linear when we need rmap_walk() on nonlinear.
*/
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg)
{
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
return rmap_walk_ksm(page, rmap_one, arg);
else if (PageAnon(page))
return rmap_walk_anon(page, rmap_one, arg);
else
return rmap_walk_file(page, rmap_one, arg);
}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_HUGETLB_PAGE
/*
* The following three functions are for anonymous (private mapped) hugepages.
* Unlike common anonymous pages, anonymous hugepages have no accounting code
* and no lru code, because we handle hugepages differently from common pages.
*/
static void __hugepage_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
if (PageAnon(page))
return;
if (!exclusive)
anon_vma = anon_vma->root;
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
}
void hugepage_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
int first;
BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
/* address might be in next vma when migration races vma_adjust */
first = atomic_inc_and_test(&page->_mapcount);
if (first)
__hugepage_set_anon_rmap(page, vma, address, 0);
}
void hugepage_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0);
__hugepage_set_anon_rmap(page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */
| gpl-2.0 |
gproj-m/lge-kernel-gproj | arch/m68k/kernel/process.c | 2997 | 8794 | /*
* linux/arch/m68k/kernel/process.c
*
* Copyright (C) 1995 Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/reboot.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
asmlinkage void ret_from_fork(void);
/*
* Return saved PC from a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(sw->retpc))
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
/*
* The idle loop on an m68k..
*/
static void default_idle(void)
{
if (!need_resched())
#if defined(MACH_ATARI_ONLY)
/* block out HSYNC on the atari (falcon) */
__asm__("stop #0x2200" : : : "cc");
#else
__asm__("stop #0x2000" : : : "cc");
#endif
}
void (*idle)(void) = default_idle;
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
idle();
schedule_preempt_disabled();
}
}
void machine_restart(char * __unused)
{
if (mach_reset)
mach_reset();
for (;;);
}
void machine_halt(void)
{
if (mach_halt)
mach_halt();
for (;;);
}
void machine_power_off(void)
{
if (mach_power_off)
mach_power_off();
for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void show_regs(struct pt_regs * regs)
{
printk("\n");
printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
regs->orig_d0, regs->d0, regs->a2, regs->a1);
printk("A0: %08lx D5: %08lx D4: %08lx\n",
regs->a0, regs->d5, regs->d4);
printk("D3: %08lx D2: %08lx D1: %08lx\n",
regs->d3, regs->d2, regs->d1);
if (!(regs->sr & PS_S))
printk("USP: %08lx\n", rdusp());
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
int pid;
mm_segment_t fs;
fs = get_fs();
set_fs (KERNEL_DS);
{
register long retval __asm__ ("d0");
register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
retval = __NR_clone;
__asm__ __volatile__
("clrl %%d2\n\t"
"trap #0\n\t" /* Linux/m68k system call */
"tstl %0\n\t" /* child or parent */
"jne 1f\n\t" /* parent - jump */
#ifdef CONFIG_MMU
"lea %%sp@(%c7),%6\n\t" /* reload current */
"movel %6@,%6\n\t"
#endif
"movel %3,%%sp@-\n\t" /* push argument */
"jsr %4@\n\t" /* call fn */
"movel %0,%%d1\n\t" /* pass exit value */
"movel %2,%%d0\n\t" /* exit */
"trap #0\n"
"1:"
: "+d" (retval)
: "i" (__NR_clone), "i" (__NR_exit),
"r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
"i" (-THREAD_SIZE)
: "d2");
pid = retval;
}
set_fs (fs);
return pid;
}
EXPORT_SYMBOL(kernel_thread);
void flush_thread(void)
{
current->thread.fs = __USER_DS;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {
unsigned long zero = 0;
asm volatile("frestore %0": :"m" (zero));
}
#endif
}
/*
* "m68k_fork()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
*/
asmlinkage int m68k_fork(struct pt_regs *regs)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
#else
return -EINVAL;
#endif
}
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
NULL, NULL);
}
asmlinkage int m68k_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags = regs->d1;
newsp = regs->d2;
parent_tidptr = (int __user *)regs->d3;
child_tidptr = (int __user *)regs->d4;
if (!newsp)
newsp = rdusp();
return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
unsigned long *retp;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->d0 = 0;
retp = ((unsigned long *) regs);
stack = ((struct switch_stack *) retp) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack;
childstack->retpc = (unsigned long)ret_from_fork;
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childstack;
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = regs->d5;
/*
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
*/
p->thread.fs = get_fs().seg;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {
/* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
if (CPU_IS_COLDFIRE) {
asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
"fmovel %/fpiar,%1\n\t"
"fmovel %/fpcr,%2\n\t"
"fmovel %/fpsr,%3"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0]),
"m" (p->thread.fpcntl[1]),
"m" (p->thread.fpcntl[2])
: "memory");
} else {
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
:
: "m" (p->thread.fp[0]),
"m" (p->thread.fpcntl[0])
: "memory");
}
}
/* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
}
#endif /* CONFIG_FPU */
return 0;
}
/* Fill in the fpu structure for a core dump. */
#ifdef CONFIG_FPU
int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
{
char fpustate[216];
if (FPU_IS_EMU) {
int i;
memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
memcpy(fpu->fpregs, current->thread.fp, 96);
/* Convert internal fpu reg representation
* into long double format
*/
for (i = 0; i < 24; i += 3)
fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
((fpu->fpregs[i] & 0x0000ffff) << 16);
return 1;
}
/* First dump the fpu context to avoid protocol violation. */
asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0;
if (CPU_IS_COLDFIRE) {
asm volatile ("fmovel %/fpiar,%0\n\t"
"fmovel %/fpcr,%1\n\t"
"fmovel %/fpsr,%2\n\t"
"fmovemd %/fp0-%/fp7,%3"
:
: "m" (fpu->fpcntl[0]),
"m" (fpu->fpcntl[1]),
"m" (fpu->fpcntl[2]),
"m" (fpu->fpregs[0])
: "memory");
} else {
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
:
: "m" (fpu->fpcntl[0])
: "memory");
asm volatile ("fmovemx %/fp0-%/fp7,%0"
:
: "m" (fpu->fpregs[0])
: "memory");
}
return 1;
}
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_FPU */
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *name,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
int error;
char * filename;
struct pt_regs *regs = (struct pt_regs *) &name;
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct thread_info) ||
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_lge_bullhead | drivers/leds/leds-rb532.c | 3253 | 1500 | /*
* LEDs driver for the "User LED" on Routerboard532
*
* Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org>
*
* Based on leds-cobalt-qube.c by Florian Fainelly and
* rb-diag.c (my own standalone driver for both LED and
* button of Routerboard532).
*/
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/mach-rc32434/gpio.h>
#include <asm/mach-rc32434/rb.h>
static void rb532_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness)
set_latch_u5(LO_ULED, 0);
else
set_latch_u5(0, LO_ULED);
}
static enum led_brightness rb532_led_get(struct led_classdev *cdev)
{
return (get_latch_u5() & LO_ULED) ? LED_FULL : LED_OFF;
}
static struct led_classdev rb532_uled = {
.name = "uled",
.brightness_set = rb532_led_set,
.brightness_get = rb532_led_get,
.default_trigger = "nand-disk",
};
static int rb532_led_probe(struct platform_device *pdev)
{
return led_classdev_register(&pdev->dev, &rb532_uled);
}
static int rb532_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&rb532_uled);
return 0;
}
static struct platform_driver rb532_led_driver = {
.probe = rb532_led_probe,
.remove = rb532_led_remove,
.driver = {
.name = "rb532-led",
.owner = THIS_MODULE,
},
};
module_platform_driver(rb532_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("User LED support for Routerboard532");
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
MODULE_ALIAS("platform:rb532-led");
| gpl-2.0 |
crimeofheart/n7000_tw_jb_kernel | drivers/gpio/rdc321x-gpio.c | 3509 | 6613 | /*
* RDC321x GPIO driver
*
* Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
* Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/mfd/rdc321x.h>
#include <linux/slab.h>
struct rdc321x_gpio {
spinlock_t lock;
struct pci_dev *sb_pdev;
u32 data_reg[2];
int reg1_ctrl_base;
int reg1_data_base;
int reg2_ctrl_base;
int reg2_data_base;
struct gpio_chip chip;
};
/* read GPIO pin */
static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct rdc321x_gpio *gpch;
u32 value = 0;
int reg;
gpch = container_of(chip, struct rdc321x_gpio, chip);
reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
spin_lock(&gpch->lock);
pci_write_config_dword(gpch->sb_pdev, reg,
gpch->data_reg[gpio < 32 ? 0 : 1]);
pci_read_config_dword(gpch->sb_pdev, reg, &value);
spin_unlock(&gpch->lock);
return (1 << (gpio & 0x1f)) & value ? 1 : 0;
}
static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct rdc321x_gpio *gpch;
int reg = (gpio < 32) ? 0 : 1;
gpch = container_of(chip, struct rdc321x_gpio, chip);
if (value)
gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
else
gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
pci_write_config_dword(gpch->sb_pdev,
reg ? gpch->reg2_data_base : gpch->reg1_data_base,
gpch->data_reg[reg]);
}
/* set GPIO pin to value */
static void rdc_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct rdc321x_gpio *gpch;
gpch = container_of(chip, struct rdc321x_gpio, chip);
spin_lock(&gpch->lock);
rdc_gpio_set_value_impl(chip, gpio, value);
spin_unlock(&gpch->lock);
}
static int rdc_gpio_config(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct rdc321x_gpio *gpch;
int err;
u32 reg;
gpch = container_of(chip, struct rdc321x_gpio, chip);
spin_lock(&gpch->lock);
err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, ®);
if (err)
goto unlock;
reg |= 1 << (gpio & 0x1f);
err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
if (err)
goto unlock;
rdc_gpio_set_value_impl(chip, gpio, value);
unlock:
spin_unlock(&gpch->lock);
return err;
}
/* configure GPIO pin as input */
static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return rdc_gpio_config(chip, gpio, 1);
}
/*
* Cache the initial value of both GPIO data registers
*/
static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
{
int err;
struct resource *r;
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;
}
rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
if (!rdc321x_gpio_dev) {
dev_err(&pdev->dev, "failed to allocate private data\n");
return -ENOMEM;
}
r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
if (!r) {
dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
err = -ENODEV;
goto out_free;
}
spin_lock_init(&rdc321x_gpio_dev->lock);
rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
rdc321x_gpio_dev->reg1_ctrl_base = r->start;
rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
if (!r) {
dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
err = -ENODEV;
goto out_free;
}
rdc321x_gpio_dev->reg2_ctrl_base = r->start;
rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
rdc321x_gpio_dev->chip.base = 0;
rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
platform_set_drvdata(pdev, rdc321x_gpio_dev);
/* This might not be, what others (BIOS, bootloader, etc.)
wrote to these registers before, but it's a good guess. Still
better than just using 0xffffffff. */
err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
rdc321x_gpio_dev->reg1_data_base,
&rdc321x_gpio_dev->data_reg[0]);
if (err)
goto out_drvdata;
err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
rdc321x_gpio_dev->reg2_data_base,
&rdc321x_gpio_dev->data_reg[1]);
if (err)
goto out_drvdata;
dev_info(&pdev->dev, "registering %d GPIOs\n",
rdc321x_gpio_dev->chip.ngpio);
return gpiochip_add(&rdc321x_gpio_dev->chip);
out_drvdata:
platform_set_drvdata(pdev, NULL);
out_free:
kfree(rdc321x_gpio_dev);
return err;
}
static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
{
int ret;
struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
if (ret)
dev_err(&pdev->dev, "failed to unregister chip\n");
kfree(rdc321x_gpio_dev);
platform_set_drvdata(pdev, NULL);
return ret;
}
static struct platform_driver rdc321x_gpio_driver = {
.driver.name = "rdc321x-gpio",
.driver.owner = THIS_MODULE,
.probe = rdc321x_gpio_probe,
.remove = __devexit_p(rdc321x_gpio_remove),
};
static int __init rdc321x_gpio_init(void)
{
return platform_driver_register(&rdc321x_gpio_driver);
}
static void __exit rdc321x_gpio_exit(void)
{
platform_driver_unregister(&rdc321x_gpio_driver);
}
module_init(rdc321x_gpio_init);
module_exit(rdc321x_gpio_exit);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x GPIO driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rdc321x-gpio");
| gpl-2.0 |
Kernel-Saram/LG-SU760-Kernel | drivers/media/dvb/siano/smssdio.c | 4021 | 8324 | /*
* smssdio.c - Siano 1xxx SDIO interface driver
*
* Copyright 2008 Pierre Ossman
*
* Based on code by Siano Mobile Silicon, Inc.,
* Copyright (C) 2006-2008, Uri Shkolnik
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
*
* This hardware is a bit odd in that all transfers should be done
* to/from the SMSSDIO_DATA register, yet the "increase address" bit
* always needs to be set.
*
* Also, buffers from the card are always aligned to 128 byte
* boundaries.
*/
/*
* General cleanup notes:
*
* - only typedefs should be name *_t
*
* - use ERR_PTR and friends for smscore_register_device()
*
* - smscore_getbuffer should zero fields
*
* Fix stop command
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include "smscoreapi.h"
#include "sms-cards.h"
/* Registers */
#define SMSSDIO_DATA 0x00
#define SMSSDIO_INT 0x04
#define SMSSDIO_BLOCK_SIZE 128
static const struct sdio_device_id smssdio_ids[] __devinitconst = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
.driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
.driver_data = SMS1XXX_BOARD_SIANO_NOVA_A},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0),
.driver_data = SMS1XXX_BOARD_SIANO_NOVA_B},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0),
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
.driver_data = SMS1XXX_BOARD_SIANO_VEGA},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, smssdio_ids);
struct smssdio_device {
struct sdio_func *func;
struct smscore_device_t *coredev;
struct smscore_buffer_t *split_cb;
};
/*******************************************************************/
/* Siano core callbacks */
/*******************************************************************/
static int smssdio_sendrequest(void *context, void *buffer, size_t size)
{
int ret = 0;
struct smssdio_device *smsdev;
smsdev = context;
sdio_claim_host(smsdev->func);
while (size >= smsdev->func->cur_blksize) {
ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
buffer, smsdev->func->cur_blksize);
if (ret)
goto out;
buffer += smsdev->func->cur_blksize;
size -= smsdev->func->cur_blksize;
}
if (size) {
ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
buffer, size);
}
out:
sdio_release_host(smsdev->func);
return ret;
}
/*******************************************************************/
/* SDIO callbacks */
/*******************************************************************/
static void smssdio_interrupt(struct sdio_func *func)
{
int ret, isr;
struct smssdio_device *smsdev;
struct smscore_buffer_t *cb;
struct SmsMsgHdr_ST *hdr;
size_t size;
smsdev = sdio_get_drvdata(func);
/*
* The interrupt register has no defined meaning. It is just
* a way of turning of the level triggered interrupt.
*/
isr = sdio_readb(func, SMSSDIO_INT, &ret);
if (ret) {
sms_err("Unable to read interrupt register!\n");
return;
}
if (smsdev->split_cb == NULL) {
cb = smscore_getbuffer(smsdev->coredev);
if (!cb) {
sms_err("Unable to allocate data buffer!\n");
return;
}
ret = sdio_memcpy_fromio(smsdev->func,
cb->p,
SMSSDIO_DATA,
SMSSDIO_BLOCK_SIZE);
if (ret) {
sms_err("Error %d reading initial block!\n", ret);
return;
}
hdr = cb->p;
if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) {
smsdev->split_cb = cb;
return;
}
if (hdr->msgLength > smsdev->func->cur_blksize)
size = hdr->msgLength - smsdev->func->cur_blksize;
else
size = 0;
} else {
cb = smsdev->split_cb;
hdr = cb->p;
size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST);
smsdev->split_cb = NULL;
}
if (size) {
void *buffer;
buffer = cb->p + (hdr->msgLength - size);
size = ALIGN(size, SMSSDIO_BLOCK_SIZE);
BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE);
/*
* First attempt to transfer all of it in one go...
*/
ret = sdio_memcpy_fromio(smsdev->func,
buffer,
SMSSDIO_DATA,
size);
if (ret && ret != -EINVAL) {
smscore_putbuffer(smsdev->coredev, cb);
sms_err("Error %d reading data from card!\n", ret);
return;
}
/*
* ..then fall back to one block at a time if that is
* not possible...
*
* (we have to do this manually because of the
* problem with the "increase address" bit)
*/
if (ret == -EINVAL) {
while (size) {
ret = sdio_memcpy_fromio(smsdev->func,
buffer, SMSSDIO_DATA,
smsdev->func->cur_blksize);
if (ret) {
smscore_putbuffer(smsdev->coredev, cb);
sms_err("Error %d reading "
"data from card!\n", ret);
return;
}
buffer += smsdev->func->cur_blksize;
if (size > smsdev->func->cur_blksize)
size -= smsdev->func->cur_blksize;
else
size = 0;
}
}
}
cb->size = hdr->msgLength;
cb->offset = 0;
smscore_onresponse(smsdev->coredev, cb);
}
static int __devinit smssdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
int board_id;
struct smssdio_device *smsdev;
struct smsdevice_params_t params;
board_id = id->driver_data;
smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL);
if (!smsdev)
return -ENOMEM;
smsdev->func = func;
memset(¶ms, 0, sizeof(struct smsdevice_params_t));
params.device = &func->dev;
params.buffer_size = 0x5000; /* ?? */
params.num_buffers = 22; /* ?? */
params.context = smsdev;
snprintf(params.devpath, sizeof(params.devpath),
"sdio\\%s", sdio_func_id(func));
params.sendrequest_handler = smssdio_sendrequest;
params.device_type = sms_get_board(board_id)->type;
if (params.device_type != SMS_STELLAR)
params.flags |= SMS_DEVICE_FAMILY2;
else {
/*
* FIXME: Stellar needs special handling...
*/
ret = -ENODEV;
goto free;
}
ret = smscore_register_device(¶ms, &smsdev->coredev);
if (ret < 0)
goto free;
smscore_set_board_id(smsdev->coredev, board_id);
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret)
goto release;
ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE);
if (ret)
goto disable;
ret = sdio_claim_irq(func, smssdio_interrupt);
if (ret)
goto disable;
sdio_set_drvdata(func, smsdev);
sdio_release_host(func);
ret = smscore_start_device(smsdev->coredev);
if (ret < 0)
goto reclaim;
return 0;
reclaim:
sdio_claim_host(func);
sdio_release_irq(func);
disable:
sdio_disable_func(func);
release:
sdio_release_host(func);
smscore_unregister_device(smsdev->coredev);
free:
kfree(smsdev);
return ret;
}
static void smssdio_remove(struct sdio_func *func)
{
struct smssdio_device *smsdev;
smsdev = sdio_get_drvdata(func);
/* FIXME: racy! */
if (smsdev->split_cb)
smscore_putbuffer(smsdev->coredev, smsdev->split_cb);
smscore_unregister_device(smsdev->coredev);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
kfree(smsdev);
}
static struct sdio_driver smssdio_driver = {
.name = "smssdio",
.id_table = smssdio_ids,
.probe = smssdio_probe,
.remove = smssdio_remove,
};
/*******************************************************************/
/* Module functions */
/*******************************************************************/
static int __init smssdio_module_init(void)
{
int ret = 0;
printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
ret = sdio_register_driver(&smssdio_driver);
return ret;
}
static void __exit smssdio_module_exit(void)
{
sdio_unregister_driver(&smssdio_driver);
}
module_init(smssdio_module_init);
module_exit(smssdio_module_exit);
MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
MODULE_AUTHOR("Pierre Ossman");
MODULE_LICENSE("GPL");
| gpl-2.0 |
DirtyUnicorns/android_kernel_htc_pyramid | drivers/input/misc/twl6040-vibra.c | 4789 | 10947 | /*
* twl6040-vibra.c - TWL6040 Vibrator driver
*
* Author: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
* Author: Misael Lopez Cruz <misael.lopez@ti.com>
*
* Copyright: (C) 2011 Texas Instruments, Inc.
*
* Based on twl4030-vibra.c by Henrik Saari <henrik.saari@nokia.com>
* Felipe Balbi <felipe.balbi@nokia.com>
* Jari Vanhala <ext-javi.vanhala@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/mfd/twl6040.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#define EFFECT_DIR_180_DEG 0x8000
/* Recommended modulation index 85% */
#define TWL6040_VIBRA_MOD 85
#define TWL6040_NUM_SUPPLIES 2
struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
struct workqueue_struct *workqueue;
struct work_struct play_work;
struct mutex mutex;
int irq;
bool enabled;
int weak_speed;
int strong_speed;
int direction;
unsigned int vibldrv_res;
unsigned int vibrdrv_res;
unsigned int viblmotor_res;
unsigned int vibrmotor_res;
struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES];
struct twl6040 *twl6040;
};
static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
{
struct vibra_info *info = data;
struct twl6040 *twl6040 = info->twl6040;
u8 status;
status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
if (status & TWL6040_VIBLOCDET) {
dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA);
}
if (status & TWL6040_VIBROCDET) {
dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA);
}
return IRQ_HANDLED;
}
static void twl6040_vibra_enable(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies);
if (ret) {
dev_err(info->dev, "failed to enable regulators %d\n", ret);
return;
}
twl6040_power(info->twl6040, 1);
if (twl6040_get_revid(twl6040) <= TWL6040_REV_ES1_1) {
/*
* ERRATA: Disable overcurrent protection for at least
* 3ms when enabling vibrator drivers to avoid false
* overcurrent detection
*/
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA | TWL6040_VIBCTRL);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA | TWL6040_VIBCTRL);
usleep_range(3000, 3500);
}
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
TWL6040_VIBENA);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
TWL6040_VIBENA);
info->enabled = true;
}
static void twl6040_vibra_disable(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00);
twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00);
twl6040_power(info->twl6040, 0);
regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies);
info->enabled = false;
}
static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res,
int speed, int direction)
{
int vpk, max_code;
u8 vibdat;
/* output swing */
vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) /
(100 * (vibdrv_res + motor_res));
/* 50mV per VIBDAT code step */
max_code = vpk / 50;
if (max_code > TWL6040_VIBDAT_MAX)
max_code = TWL6040_VIBDAT_MAX;
/* scale speed to max allowed code */
vibdat = (u8)((speed * max_code) / USHRT_MAX);
/* 2's complement for direction > 180 degrees */
vibdat *= direction;
return vibdat;
}
static void twl6040_vibra_set_effect(struct vibra_info *info)
{
struct twl6040 *twl6040 = info->twl6040;
u8 vibdatl, vibdatr;
int volt;
/* weak motor */
volt = regulator_get_voltage(info->supplies[0].consumer) / 1000;
vibdatl = twl6040_vibra_code(volt, info->vibldrv_res,
info->viblmotor_res,
info->weak_speed, info->direction);
/* strong motor */
volt = regulator_get_voltage(info->supplies[1].consumer) / 1000;
vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res,
info->vibrmotor_res,
info->strong_speed, info->direction);
twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl);
twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr);
}
static void vibra_play_work(struct work_struct *work)
{
struct vibra_info *info = container_of(work,
struct vibra_info, play_work);
mutex_lock(&info->mutex);
if (info->weak_speed || info->strong_speed) {
if (!info->enabled)
twl6040_vibra_enable(info);
twl6040_vibra_set_effect(info);
} else if (info->enabled)
twl6040_vibra_disable(info);
mutex_unlock(&info->mutex);
}
static int vibra_play(struct input_dev *input, void *data,
struct ff_effect *effect)
{
struct vibra_info *info = input_get_drvdata(input);
int ret;
/* Do not allow effect, while the routing is set to use audio */
ret = twl6040_get_vibralr_status(info->twl6040);
if (ret & TWL6040_VIBSEL) {
dev_info(&input->dev, "Vibra is configured for audio\n");
return -EBUSY;
}
info->weak_speed = effect->u.rumble.weak_magnitude;
info->strong_speed = effect->u.rumble.strong_magnitude;
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
ret = queue_work(info->workqueue, &info->play_work);
if (!ret) {
dev_info(&input->dev, "work is already on queue\n");
return ret;
}
return 0;
}
static void twl6040_vibra_close(struct input_dev *input)
{
struct vibra_info *info = input_get_drvdata(input);
cancel_work_sync(&info->play_work);
mutex_lock(&info->mutex);
if (info->enabled)
twl6040_vibra_disable(info);
mutex_unlock(&info->mutex);
}
#ifdef CONFIG_PM_SLEEP
static int twl6040_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
mutex_lock(&info->mutex);
if (info->enabled)
twl6040_vibra_disable(info);
mutex_unlock(&info->mutex);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
{
struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
struct vibra_info *info;
int ret;
if (!pdata) {
dev_err(&pdev->dev, "platform_data not available\n");
return -EINVAL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "couldn't allocate memory\n");
return -ENOMEM;
}
info->dev = &pdev->dev;
info->twl6040 = dev_get_drvdata(pdev->dev.parent);
info->vibldrv_res = pdata->vibldrv_res;
info->vibrdrv_res = pdata->vibrdrv_res;
info->viblmotor_res = pdata->viblmotor_res;
info->vibrmotor_res = pdata->vibrmotor_res;
if ((!info->vibldrv_res && !info->viblmotor_res) ||
(!info->vibrdrv_res && !info->vibrmotor_res)) {
dev_err(info->dev, "invalid vibra driver/motor resistance\n");
ret = -EINVAL;
goto err_kzalloc;
}
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
dev_err(info->dev, "invalid irq\n");
ret = -EINVAL;
goto err_kzalloc;
}
mutex_init(&info->mutex);
info->input_dev = input_allocate_device();
if (info->input_dev == NULL) {
dev_err(info->dev, "couldn't allocate input device\n");
ret = -ENOMEM;
goto err_kzalloc;
}
input_set_drvdata(info->input_dev, info);
info->input_dev->name = "twl6040:vibrator";
info->input_dev->id.version = 1;
info->input_dev->dev.parent = pdev->dev.parent;
info->input_dev->close = twl6040_vibra_close;
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
if (ret < 0) {
dev_err(info->dev, "couldn't register vibrator to FF\n");
goto err_ialloc;
}
ret = input_register_device(info->input_dev);
if (ret < 0) {
dev_err(info->dev, "couldn't register input device\n");
goto err_iff;
}
platform_set_drvdata(pdev, info);
ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
"twl6040_irq_vib", info);
if (ret) {
dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
goto err_irq;
}
info->supplies[0].supply = "vddvibl";
info->supplies[1].supply = "vddvibr";
ret = regulator_bulk_get(info->dev, ARRAY_SIZE(info->supplies),
info->supplies);
if (ret) {
dev_err(info->dev, "couldn't get regulators %d\n", ret);
goto err_regulator;
}
if (pdata->vddvibl_uV) {
ret = regulator_set_voltage(info->supplies[0].consumer,
pdata->vddvibl_uV,
pdata->vddvibl_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
ret);
goto err_voltage;
}
}
if (pdata->vddvibr_uV) {
ret = regulator_set_voltage(info->supplies[1].consumer,
pdata->vddvibr_uV,
pdata->vddvibr_uV);
if (ret) {
dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
ret);
goto err_voltage;
}
}
info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
if (info->workqueue == NULL) {
dev_err(info->dev, "couldn't create workqueue\n");
ret = -ENOMEM;
goto err_voltage;
}
INIT_WORK(&info->play_work, vibra_play_work);
return 0;
err_voltage:
regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
err_regulator:
free_irq(info->irq, info);
err_irq:
input_unregister_device(info->input_dev);
info->input_dev = NULL;
err_iff:
if (info->input_dev)
input_ff_destroy(info->input_dev);
err_ialloc:
input_free_device(info->input_dev);
err_kzalloc:
kfree(info);
return ret;
}
static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
{
struct vibra_info *info = platform_get_drvdata(pdev);
input_unregister_device(info->input_dev);
free_irq(info->irq, info);
regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
destroy_workqueue(info->workqueue);
kfree(info);
return 0;
}
static struct platform_driver twl6040_vibra_driver = {
.probe = twl6040_vibra_probe,
.remove = __devexit_p(twl6040_vibra_remove),
.driver = {
.name = "twl6040-vibra",
.owner = THIS_MODULE,
.pm = &twl6040_vibra_pm_ops,
},
};
module_platform_driver(twl6040_vibra_driver);
MODULE_ALIAS("platform:twl6040-vibra");
MODULE_DESCRIPTION("TWL6040 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
| gpl-2.0 |
jibaron/ddebug | drivers/media/v4l2-core/videobuf-dvb.c | 4789 | 10220 | /*
*
* some helper function for simple DVB cards which simply DMA the
* complete transport stream and let the computer sort everything else
* (i.e. we are using the software demux, ...). Also uses the
* video-buf to manage DMA buffers.
*
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kthread.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/freezer.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dvb.h>
/* ------------------------------------------------------------------ */
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages");
#define dprintk(fmt, arg...) if (debug) \
printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
/* ------------------------------------------------------------------ */
static int videobuf_dvb_thread(void *data)
{
struct videobuf_dvb *dvb = data;
struct videobuf_buffer *buf;
unsigned long flags;
void *outp;
dprintk("dvb thread started\n");
set_freezable();
videobuf_read_start(&dvb->dvbq);
for (;;) {
/* fetch next buffer */
buf = list_entry(dvb->dvbq.stream.next,
struct videobuf_buffer, stream);
list_del(&buf->stream);
videobuf_waiton(&dvb->dvbq, buf, 0, 1);
/* no more feeds left or stop_feed() asked us to quit */
if (0 == dvb->nfeeds)
break;
if (kthread_should_stop())
break;
try_to_freeze();
/* feed buffer data to demux */
outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
if (buf->state == VIDEOBUF_DONE)
dvb_dmx_swfilter(&dvb->demux, outp,
buf->size);
/* requeue buffer */
list_add_tail(&buf->stream,&dvb->dvbq.stream);
spin_lock_irqsave(dvb->dvbq.irqlock,flags);
dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
}
videobuf_read_stop(&dvb->dvbq);
dprintk("dvb thread stopped\n");
/* Hmm, linux becomes *very* unhappy without this ... */
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
return 0;
}
static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct videobuf_dvb *dvb = demux->priv;
int rc;
if (!demux->dmx.frontend)
return -EINVAL;
mutex_lock(&dvb->lock);
dvb->nfeeds++;
rc = dvb->nfeeds;
if (NULL != dvb->thread)
goto out;
dvb->thread = kthread_run(videobuf_dvb_thread,
dvb, "%s dvb", dvb->name);
if (IS_ERR(dvb->thread)) {
rc = PTR_ERR(dvb->thread);
dvb->thread = NULL;
}
out:
mutex_unlock(&dvb->lock);
return rc;
}
static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct videobuf_dvb *dvb = demux->priv;
int err = 0;
mutex_lock(&dvb->lock);
dvb->nfeeds--;
if (0 == dvb->nfeeds && NULL != dvb->thread) {
err = kthread_stop(dvb->thread);
dvb->thread = NULL;
}
mutex_unlock(&dvb->lock);
return err;
}
static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
struct module *module,
void *adapter_priv,
struct device *device,
char *adapter_name,
short *adapter_nr,
int mfe_shared)
{
int result;
mutex_init(&fe->lock);
/* register adapter */
result = dvb_register_adapter(&fe->adapter, adapter_name, module,
device, adapter_nr);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
adapter_name, result);
}
fe->adapter.priv = adapter_priv;
fe->adapter.mfe_shared = mfe_shared;
return result;
}
static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
struct videobuf_dvb *dvb)
{
int result;
/* register frontend */
result = dvb_register_frontend(adapter, dvb->frontend);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
dvb->name, result);
goto fail_frontend;
}
/* register demux stuff */
dvb->demux.dmx.capabilities =
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING;
dvb->demux.priv = dvb;
dvb->demux.filternum = 256;
dvb->demux.feednum = 256;
dvb->demux.start_feed = videobuf_dvb_start_feed;
dvb->demux.stop_feed = videobuf_dvb_stop_feed;
result = dvb_dmx_init(&dvb->demux);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
dvb->name, result);
goto fail_dmx;
}
dvb->dmxdev.filternum = 256;
dvb->dmxdev.demux = &dvb->demux.dmx;
dvb->dmxdev.capabilities = 0;
result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
dvb->name, result);
goto fail_dmxdev;
}
dvb->fe_hw.source = DMX_FRONTEND_0;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
dvb->name, result);
goto fail_fe_hw;
}
dvb->fe_mem.source = DMX_MEMORY_FE;
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
if (result < 0) {
printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
dvb->name, result);
goto fail_fe_mem;
}
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
if (result < 0) {
printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
dvb->name, result);
goto fail_fe_conn;
}
/* register network adapter */
result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
if (result < 0) {
printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
dvb->name, result);
goto fail_fe_conn;
}
return 0;
fail_fe_conn:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
fail_fe_mem:
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
fail_fe_hw:
dvb_dmxdev_release(&dvb->dmxdev);
fail_dmxdev:
dvb_dmx_release(&dvb->demux);
fail_dmx:
dvb_unregister_frontend(dvb->frontend);
fail_frontend:
dvb_frontend_detach(dvb->frontend);
dvb->frontend = NULL;
return result;
}
/* ------------------------------------------------------------------ */
/* Register a single adapter and one or more frontends */
int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
struct module *module,
void *adapter_priv,
struct device *device,
short *adapter_nr,
int mfe_shared)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe;
int res;
fe = videobuf_dvb_get_frontend(f, 1);
if (!fe) {
printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
return -EINVAL;
}
/* Bring up the adapter */
res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
fe->dvb.name, adapter_nr, mfe_shared);
if (res < 0) {
printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
return res;
}
/* Attach all of the frontends to the adapter */
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
if (res < 0) {
printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
fe->dvb.name, res);
goto err;
}
}
mutex_unlock(&f->lock);
return 0;
err:
mutex_unlock(&f->lock);
videobuf_dvb_unregister_bus(f);
return res;
}
EXPORT_SYMBOL(videobuf_dvb_register_bus);
void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
{
videobuf_dvb_dealloc_frontends(f);
dvb_unregister_adapter(&f->adapter);
}
EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
struct videobuf_dvb_frontends *f, int id)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe, *ret = NULL;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->id == id) {
ret = fe;
break;
}
}
mutex_unlock(&f->lock);
return ret;
}
EXPORT_SYMBOL(videobuf_dvb_get_frontend);
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
struct dvb_frontend *p)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe = NULL;
int ret = 0;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->dvb.frontend == p) {
ret = fe->id;
break;
}
}
mutex_unlock(&f->lock);
return ret;
}
EXPORT_SYMBOL(videobuf_dvb_find_frontend);
struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
struct videobuf_dvb_frontends *f, int id)
{
struct videobuf_dvb_frontend *fe;
fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
if (fe == NULL)
goto fail_alloc;
fe->id = id;
mutex_init(&fe->dvb.lock);
mutex_lock(&f->lock);
list_add_tail(&fe->felist, &f->felist);
mutex_unlock(&f->lock);
fail_alloc:
return fe;
}
EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
{
struct list_head *list, *q;
struct videobuf_dvb_frontend *fe;
mutex_lock(&f->lock);
list_for_each_safe(list, q, &f->felist) {
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
if (fe->dvb.net.dvbdev) {
dvb_net_release(&fe->dvb.net);
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
&fe->dvb.fe_mem);
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
&fe->dvb.fe_hw);
dvb_dmxdev_release(&fe->dvb.dmxdev);
dvb_dmx_release(&fe->dvb.demux);
dvb_unregister_frontend(fe->dvb.frontend);
}
if (fe->dvb.frontend)
/* always allocated, may have been reset */
dvb_frontend_detach(fe->dvb.frontend);
list_del(list); /* remove list entry */
kfree(fe); /* free frontend allocation */
}
mutex_unlock(&f->lock);
}
EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
| gpl-2.0 |
luca020400/android_kernel_motorola_msm8226 | drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 4789 | 8941 | /*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Authors:
* Inki Dae <inki.dae@samsung.com>
* Seung-Woo Kim <sw0312.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include "drmP.h"
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
#define to_subdrv(dev) to_context(dev)
#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
struct drm_hdmi_context, subdrv);
/* these callback points shoud be set by specific drivers. */
static struct exynos_hdmi_ops *hdmi_ops;
static struct exynos_mixer_ops *mixer_ops;
struct drm_hdmi_context {
struct exynos_drm_subdrv subdrv;
struct exynos_drm_hdmi_context *hdmi_ctx;
struct exynos_drm_hdmi_context *mixer_ctx;
};
void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ops)
hdmi_ops = ops;
}
void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ops)
mixer_ops = ops;
}
static bool drm_hdmi_is_connected(struct device *dev)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->is_connected)
return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
return false;
}
static int drm_hdmi_get_edid(struct device *dev,
struct drm_connector *connector, u8 *edid, int len)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->get_edid)
return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
len);
return 0;
}
static int drm_hdmi_check_timing(struct device *dev, void *timing)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->check_timing)
return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
return 0;
}
static int drm_hdmi_power_on(struct device *dev, int mode)
{
struct drm_hdmi_context *ctx = to_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->power_on)
return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
return 0;
}
static struct exynos_drm_display_ops drm_hdmi_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_HDMI,
.is_connected = drm_hdmi_is_connected,
.get_edid = drm_hdmi_get_edid,
.check_timing = drm_hdmi_check_timing,
.power_on = drm_hdmi_power_on,
};
static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct exynos_drm_manager *manager = subdrv->manager;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->enable_vblank)
return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
manager->pipe);
return 0;
}
static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->disable_vblank)
return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
}
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->mode_fixup)
hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
adjusted_mode);
}
static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->mode_set)
hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
}
static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
unsigned int *width, unsigned int *height)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->get_max_resol)
hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
}
static void drm_hdmi_commit(struct device *subdrv_dev)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (hdmi_ops && hdmi_ops->commit)
hdmi_ops->commit(ctx->hdmi_ctx->ctx);
}
static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
switch (mode) {
case DRM_MODE_DPMS_ON:
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (hdmi_ops && hdmi_ops->disable)
hdmi_ops->disable(ctx->hdmi_ctx->ctx);
break;
default:
DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
break;
}
}
static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
.dpms = drm_hdmi_dpms,
.enable_vblank = drm_hdmi_enable_vblank,
.disable_vblank = drm_hdmi_disable_vblank,
.mode_fixup = drm_hdmi_mode_fixup,
.mode_set = drm_hdmi_mode_set,
.get_max_resol = drm_hdmi_get_max_resol,
.commit = drm_hdmi_commit,
};
static void drm_mixer_mode_set(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->win_mode_set)
mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
}
static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->win_commit)
mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
}
static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (mixer_ops && mixer_ops->win_disable)
mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
}
static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
.mode_set = drm_mixer_mode_set,
.commit = drm_mixer_commit,
.disable = drm_mixer_disable,
};
static struct exynos_drm_manager hdmi_manager = {
.pipe = -1,
.ops = &drm_hdmi_manager_ops,
.overlay_ops = &drm_hdmi_overlay_ops,
.display_ops = &drm_hdmi_display_ops,
};
static int hdmi_subdrv_probe(struct drm_device *drm_dev,
struct device *dev)
{
struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
struct drm_hdmi_context *ctx;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_drm_common_hdmi_pd *pd;
DRM_DEBUG_KMS("%s\n", __FILE__);
pd = pdev->dev.platform_data;
if (!pd) {
DRM_DEBUG_KMS("platform data is null.\n");
return -EFAULT;
}
if (!pd->hdmi_dev) {
DRM_DEBUG_KMS("hdmi device is null.\n");
return -EFAULT;
}
if (!pd->mixer_dev) {
DRM_DEBUG_KMS("mixer device is null.\n");
return -EFAULT;
}
ctx = get_ctx_from_subdrv(subdrv);
ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
to_context(pd->hdmi_dev);
if (!ctx->hdmi_ctx) {
DRM_DEBUG_KMS("hdmi context is null.\n");
return -EFAULT;
}
ctx->hdmi_ctx->drm_dev = drm_dev;
ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
to_context(pd->mixer_dev);
if (!ctx->mixer_ctx) {
DRM_DEBUG_KMS("mixer context is null.\n");
return -EFAULT;
}
ctx->mixer_ctx->drm_dev = drm_dev;
return 0;
}
static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_drm_subdrv *subdrv;
struct drm_hdmi_context *ctx;
DRM_DEBUG_KMS("%s\n", __FILE__);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_LOG_KMS("failed to alloc common hdmi context.\n");
return -ENOMEM;
}
subdrv = &ctx->subdrv;
subdrv->dev = dev;
subdrv->manager = &hdmi_manager;
subdrv->probe = hdmi_subdrv_probe;
platform_set_drvdata(pdev, subdrv);
exynos_drm_subdrv_register(subdrv);
return 0;
}
static int hdmi_runtime_suspend(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
return 0;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
{
struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
kfree(ctx);
return 0;
}
struct platform_driver exynos_drm_common_hdmi_driver = {
.probe = exynos_drm_hdmi_probe,
.remove = __devexit_p(exynos_drm_hdmi_remove),
.driver = {
.name = "exynos-drm-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
},
};
| gpl-2.0 |
TeamLGOG/lge-kernel-msm8960-pro | drivers/acpi/acpica/exoparg2.c | 5045 | 15644 | /******************************************************************************
*
* Module Name: exoparg2 - AML execution - opcodes with 2 arguments
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "acinterp.h"
#include "acevents.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exoparg2")
/*!
* Naming convention for AML interpreter execution routines.
*
* The routines that begin execution of AML opcodes are named with a common
* convention based upon the number of arguments, the number of target operands,
* and whether or not a value is returned:
*
* AcpiExOpcode_xA_yT_zR
*
* Where:
*
* xA - ARGUMENTS: The number of arguments (input operands) that are
* required for this opcode type (1 through 6 args).
* yT - TARGETS: The number of targets (output operands) that are required
* for this opcode type (0, 1, or 2 targets).
* zR - RETURN VALUE: Indicates whether this opcode type returns a value
* as the function return (0 or 1).
*
* The AcpiExOpcode* functions are called via the Dispatcher component with
* fully resolved operands.
!*/
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_2A_0T_0R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with two arguments, no target, and no return
* value.
*
* ALLOCATION: Deletes both operands
*
******************************************************************************/
acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
struct acpi_namespace_node *node;
u32 value;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the opcode */
switch (walk_state->opcode) {
case AML_NOTIFY_OP: /* Notify (notify_object, notify_value) */
/* The first operand is a namespace node */
node = (struct acpi_namespace_node *)operand[0];
/* Second value is the notify value */
value = (u32) operand[1]->integer.value;
/* Are notifies allowed on this object? */
if (!acpi_ev_is_notify_object(node)) {
ACPI_ERROR((AE_INFO,
"Unexpected notify object type [%s]",
acpi_ut_get_type_name(node->type)));
status = AE_AML_OPERAND_TYPE;
break;
}
/*
* Dispatch the notify to the appropriate handler
* NOTE: the request is queued for execution after this method
* completes. The notify handlers are NOT invoked synchronously
* from this thread -- because handlers may in turn run other
* control methods.
*/
status = acpi_ev_queue_notify_request(node, value);
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_2A_2T_1R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute a dyadic operator (2 operands) with 2 output targets
* and one implicit return value.
*
******************************************************************************/
acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc1 = NULL;
union acpi_operand_object *return_desc2 = NULL;
acpi_status status;
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
switch (walk_state->opcode) {
case AML_DIVIDE_OP:
/* Divide (Dividend, Divisor, remainder_result quotient_result) */
return_desc1 =
acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc1) {
status = AE_NO_MEMORY;
goto cleanup;
}
return_desc2 =
acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc2) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Quotient to return_desc1, remainder to return_desc2 */
status = acpi_ut_divide(operand[0]->integer.value,
operand[1]->integer.value,
&return_desc1->integer.value,
&return_desc2->integer.value);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
/* Store the results to the target reference operands */
status = acpi_ex_store(return_desc2, operand[2], walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
status = acpi_ex_store(return_desc1, operand[3], walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
cleanup:
/*
* Since the remainder is not returned indirectly, remove a reference to
* it. Only the quotient is returned indirectly.
*/
acpi_ut_remove_reference(return_desc2);
if (ACPI_FAILURE(status)) {
/* Delete the return object */
acpi_ut_remove_reference(return_desc1);
}
/* Save return object (the remainder) on success */
else {
walk_state->result_obj = return_desc1;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_2A_1T_1R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with two arguments, one target, and a return
* value.
*
******************************************************************************/
acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
u64 index;
acpi_status status = AE_OK;
acpi_size length;
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
if (walk_state->op_info->flags & AML_MATH) {
/* All simple math opcodes (add, etc.) */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
return_desc->integer.value =
acpi_ex_do_math_op(walk_state->opcode,
operand[0]->integer.value,
operand[1]->integer.value);
goto store_result_to_target;
}
switch (walk_state->opcode) {
case AML_MOD_OP: /* Mod (Dividend, Divisor, remainder_result (ACPI 2.0) */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* return_desc will contain the remainder */
status = acpi_ut_divide(operand[0]->integer.value,
operand[1]->integer.value,
NULL, &return_desc->integer.value);
break;
case AML_CONCAT_OP: /* Concatenate (Data1, Data2, Result) */
status = acpi_ex_do_concatenate(operand[0], operand[1],
&return_desc, walk_state);
break;
case AML_TO_STRING_OP: /* to_string (Buffer, Length, Result) (ACPI 2.0) */
/*
* Input object is guaranteed to be a buffer at this point (it may have
* been converted.) Copy the raw buffer data to a new object of
* type String.
*/
/*
* Get the length of the new string. It is the smallest of:
* 1) Length of the input buffer
* 2) Max length as specified in the to_string operator
* 3) Length of input buffer up to a zero byte (null terminator)
*
* NOTE: A length of zero is ok, and will create a zero-length, null
* terminated string.
*/
length = 0;
while ((length < operand[0]->buffer.length) &&
(length < operand[1]->integer.value) &&
(operand[0]->buffer.pointer[length])) {
length++;
}
/* Allocate a new string object */
return_desc = acpi_ut_create_string_object(length);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* Copy the raw buffer data with no transform.
* (NULL terminated already)
*/
ACPI_MEMCPY(return_desc->string.pointer,
operand[0]->buffer.pointer, length);
break;
case AML_CONCAT_RES_OP:
/* concatenate_res_template (Buffer, Buffer, Result) (ACPI 2.0) */
status = acpi_ex_concat_template(operand[0], operand[1],
&return_desc, walk_state);
break;
case AML_INDEX_OP: /* Index (Source Index Result) */
/* Create the internal return object */
return_desc =
acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Initialize the Index reference object */
index = operand[1]->integer.value;
return_desc->reference.value = (u32) index;
return_desc->reference.class = ACPI_REFCLASS_INDEX;
/*
* At this point, the Source operand is a String, Buffer, or Package.
* Verify that the index is within range.
*/
switch ((operand[0])->common.type) {
case ACPI_TYPE_STRING:
if (index >= operand[0]->string.length) {
status = AE_AML_STRING_LIMIT;
}
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
break;
case ACPI_TYPE_BUFFER:
if (index >= operand[0]->buffer.length) {
status = AE_AML_BUFFER_LIMIT;
}
return_desc->reference.target_type =
ACPI_TYPE_BUFFER_FIELD;
break;
case ACPI_TYPE_PACKAGE:
if (index >= operand[0]->package.count) {
status = AE_AML_PACKAGE_LIMIT;
}
return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
return_desc->reference.where =
&operand[0]->package.elements[index];
break;
default:
status = AE_AML_INTERNAL;
goto cleanup;
}
/* Failure means that the Index was beyond the end of the object */
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Index (0x%8.8X%8.8X) is beyond end of object",
ACPI_FORMAT_UINT64(index)));
goto cleanup;
}
/*
* Save the target object and add a reference to it for the life
* of the index
*/
return_desc->reference.object = operand[0];
acpi_ut_add_reference(operand[0]);
/* Store the reference to the Target */
status = acpi_ex_store(return_desc, operand[2], walk_state);
/* Return the reference */
walk_state->result_obj = return_desc;
goto cleanup;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
break;
}
store_result_to_target:
if (ACPI_SUCCESS(status)) {
/*
* Store the result of the operation (which is now in return_desc) into
* the Target descriptor.
*/
status = acpi_ex_store(return_desc, operand[2], walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
if (!walk_state->result_obj) {
walk_state->result_obj = return_desc;
}
}
cleanup:
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
walk_state->result_obj = NULL;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_opcode_2A_0T_1R
*
* PARAMETERS: walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: Execute opcode with 2 arguments, no target, and a return value
*
******************************************************************************/
acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
acpi_status status = AE_OK;
u8 logical_result = FALSE;
ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Create the internal return object */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Execute the Opcode */
if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
operand[0]->integer.
value,
operand[1]->integer.
value, &logical_result);
goto store_logical_result;
} else if (walk_state->op_info->flags & AML_LOGICAL) {
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
operand[1], &logical_result);
goto store_logical_result;
}
switch (walk_state->opcode) {
case AML_ACQUIRE_OP: /* Acquire (mutex_object, Timeout) */
status =
acpi_ex_acquire_mutex(operand[1], operand[0], walk_state);
if (status == AE_TIME) {
logical_result = TRUE; /* TRUE = Acquire timed out */
status = AE_OK;
}
break;
case AML_WAIT_OP: /* Wait (event_object, Timeout) */
status = acpi_ex_system_wait_event(operand[1], operand[0]);
if (status == AE_TIME) {
logical_result = TRUE; /* TRUE, Wait timed out */
status = AE_OK;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
walk_state->opcode));
status = AE_AML_BAD_OPCODE;
goto cleanup;
}
store_logical_result:
/*
* Set return value to according to logical_result. logical TRUE (all ones)
* Default is FALSE (zero)
*/
if (logical_result) {
return_desc->integer.value = ACPI_UINT64_MAX;
}
cleanup:
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
/* Save return object on success */
else {
walk_state->result_obj = return_desc;
}
return_ACPI_STATUS(status);
}
| gpl-2.0 |
ghsr/android_kernel_samsung_i9152 | arch/sh/boards/mach-sh7763rdp/irq.c | 13237 | 1119 | /*
* linux/arch/sh/boards/renesas/sh7763rdp/irq.c
*
* Renesas Solutions SH7763RDP Support.
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/sh7763rdp.h>
#define INTC_BASE (0xFFD00000)
#define INTC_INT2PRI7 (INTC_BASE+0x4001C)
#define INTC_INT2MSKCR (INTC_BASE+0x4003C)
#define INTC_INT2MSKCR1 (INTC_BASE+0x400D4)
/*
* Initialize IRQ setting
*/
void __init init_sh7763rdp_IRQ(void)
{
/* GPIO enabled */
__raw_writel(1 << 25, INTC_INT2MSKCR);
/* enable GPIO interrupts */
__raw_writel((__raw_readl(INTC_INT2PRI7) & 0xFF00FFFF) | 0x000F0000,
INTC_INT2PRI7);
/* USBH enabled */
__raw_writel(1 << 17, INTC_INT2MSKCR1);
/* GETHER enabled */
__raw_writel(1 << 16, INTC_INT2MSKCR1);
/* DMAC enabled */
__raw_writel(1 << 8, INTC_INT2MSKCR);
}
| gpl-2.0 |
VegaDevTeam/android_kernel_pantech_ef52s | arch/score/lib/lshrdi3.c | 13749 | 1257 | /*
* arch/score/lib/lshrdi3.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include "libgcc.h"
long long __lshrdi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
if (b == 0)
return u;
uu.ll = u;
bm = 32 - b;
if (bm <= 0) {
w.s.high = 0;
w.s.low = (unsigned int) uu.s.high >> -bm;
} else {
const unsigned int carries = (unsigned int) uu.s.high << bm;
w.s.high = (unsigned int) uu.s.high >> b;
w.s.low = ((unsigned int) uu.s.low >> b) | carries;
}
return w.ll;
}
EXPORT_SYMBOL(__lshrdi3);
| gpl-2.0 |
profosure/porogram | TMessagesProj/jni/opus/silk/arm/NSQ_neon.c | 182 | 4800 | /***********************************************************************
Copyright (C) 2014 Vidyo
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Internet Society, IETF or IETF Trust, nor the
names of specific contributors, may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <arm_neon.h>
#include "main.h"
#include "stack_alloc.h"
#include "NSQ.h"
#include "celt/cpu_support.h"
#include "celt/arm/armcpu.h"
opus_int32 silk_noise_shape_quantizer_short_prediction_neon(const opus_int32 *buf32, const opus_int32 *coef32, opus_int order)
{
int32x4_t coef0 = vld1q_s32(coef32);
int32x4_t coef1 = vld1q_s32(coef32 + 4);
int32x4_t coef2 = vld1q_s32(coef32 + 8);
int32x4_t coef3 = vld1q_s32(coef32 + 12);
int32x4_t a0 = vld1q_s32(buf32 - 15);
int32x4_t a1 = vld1q_s32(buf32 - 11);
int32x4_t a2 = vld1q_s32(buf32 - 7);
int32x4_t a3 = vld1q_s32(buf32 - 3);
int32x4_t b0 = vqdmulhq_s32(coef0, a0);
int32x4_t b1 = vqdmulhq_s32(coef1, a1);
int32x4_t b2 = vqdmulhq_s32(coef2, a2);
int32x4_t b3 = vqdmulhq_s32(coef3, a3);
int32x4_t c0 = vaddq_s32(b0, b1);
int32x4_t c1 = vaddq_s32(b2, b3);
int32x4_t d = vaddq_s32(c0, c1);
int64x2_t e = vpaddlq_s32(d);
int64x1_t f = vadd_s64(vget_low_s64(e), vget_high_s64(e));
opus_int32 out = vget_lane_s32(vreinterpret_s32_s64(f), 0);
out += silk_RSHIFT( order, 1 );
return out;
}
opus_int32 silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef, opus_int order)
{
opus_int32 out;
if (order == 8)
{
int32x4_t a00 = vdupq_n_s32(data0[0]);
int32x4_t a01 = vld1q_s32(data1); /* data1[0] ... [3] */
int32x4_t a0 = vextq_s32 (a00, a01, 3); /* data0[0] data1[0] ...[2] */
int32x4_t a1 = vld1q_s32(data1 + 3); /* data1[3] ... [6] */
/*TODO: Convert these once in advance instead of once per sample, like
silk_noise_shape_quantizer_short_prediction_neon() does.*/
int16x8_t coef16 = vld1q_s16(coef);
int32x4_t coef0 = vmovl_s16(vget_low_s16(coef16));
int32x4_t coef1 = vmovl_s16(vget_high_s16(coef16));
/*This is not bit-exact with the C version, since we do not drop the
lower 16 bits of each multiply, but wait until the end to truncate
precision. This is an encoder-specific calculation (and unlike
silk_noise_shape_quantizer_short_prediction_neon(), is not meant to
simulate what the decoder will do). We still could use vqdmulhq_s32()
like silk_noise_shape_quantizer_short_prediction_neon() and save
half the multiplies, but the speed difference is not large, since we
then need two extra adds.*/
int64x2_t b0 = vmull_s32(vget_low_s32(a0), vget_low_s32(coef0));
int64x2_t b1 = vmlal_s32(b0, vget_high_s32(a0), vget_high_s32(coef0));
int64x2_t b2 = vmlal_s32(b1, vget_low_s32(a1), vget_low_s32(coef1));
int64x2_t b3 = vmlal_s32(b2, vget_high_s32(a1), vget_high_s32(coef1));
int64x1_t c = vadd_s64(vget_low_s64(b3), vget_high_s64(b3));
int64x1_t cS = vrshr_n_s64(c, 15);
int32x2_t d = vreinterpret_s32_s64(cS);
out = vget_lane_s32(d, 0);
vst1q_s32(data1, a0);
vst1q_s32(data1 + 4, a1);
return out;
}
return silk_NSQ_noise_shape_feedback_loop_c(data0, data1, coef, order);
}
| gpl-2.0 |
lyfkevin/Wind_iproj_JB_kernel_old | drivers/spmi/spmi-resources.c | 438 | 4008 | /* Copyright (c) 2002-3 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Resource handling based on platform.c.
*/
#include <linux/export.h>
#include <linux/spmi.h>
#include <linux/string.h>
/**
* spmi_get_resource - get a resource for a device
* @dev: spmi device
* @node: device node resource
* @type: resource type
* @res_num: resource index
*
* If 'node' is specified as NULL, then the API treats this as a special
* case to assume the first devnode. For configurations that do not use
* spmi-dev-container, there is only one node to begin with, so NULL should
* be passed in this case.
*
* Returns
* NULL on failure.
*/
struct resource *spmi_get_resource(struct spmi_device *dev,
struct spmi_resource *node,
unsigned int type, unsigned int res_num)
{
int i;
/* if a node is not specified, default to the first node */
if (!node)
node = &dev->res;
for (i = 0; i < node->num_resources; i++) {
struct resource *r = &node->resource[i];
if (type == resource_type(r) && res_num-- == 0)
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(spmi_get_resource);
#define SPMI_MAX_RES_NAME 256
/**
* spmi_get_resource_byname - get a resource for a device given a name
* @dev: spmi device handle
* @node: device node resource
* @type: resource type
* @name: resource name to lookup
*/
struct resource *spmi_get_resource_byname(struct spmi_device *dev,
struct spmi_resource *node,
unsigned int type,
const char *name)
{
int i;
/* if a node is not specified, default to the first node */
if (!node)
node = &dev->res;
for (i = 0; i < node->num_resources; i++) {
struct resource *r = &node->resource[i];
if (type == resource_type(r) && r->name &&
!strncmp(r->name, name, SPMI_MAX_RES_NAME))
return r;
}
return NULL;
}
EXPORT_SYMBOL_GPL(spmi_get_resource_byname);
/**
* spmi_get_irq - get an IRQ for a device
* @dev: spmi device
* @node: device node resource
* @res_num: IRQ number index
*
* Returns
* -ENXIO on failure.
*/
int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
unsigned int res_num)
{
struct resource *r = spmi_get_resource(dev, node,
IORESOURCE_IRQ, res_num);
return r ? r->start : -ENXIO;
}
EXPORT_SYMBOL_GPL(spmi_get_irq);
/**
* spmi_get_irq_byname - get an IRQ for a device given a name
* @dev: spmi device handle
* @node: device node resource
* @name: resource name to lookup
*
* Returns -ENXIO on failure
*/
int spmi_get_irq_byname(struct spmi_device *dev,
struct spmi_resource *node, const char *name)
{
struct resource *r = spmi_get_resource_byname(dev, node,
IORESOURCE_IRQ, name);
return r ? r->start : -ENXIO;
}
EXPORT_SYMBOL_GPL(spmi_get_irq_byname);
/*
* spmi_get_container_dev_byname - get a device node resource
* @dev: spmi device handle
* @label: device name to lookup
*
* Only useable in spmi-dev-container configurations. Given a name,
* find the associated spmi_resource that matches the name.
*
* Return NULL if the spmi_device is not a dev-container,
* or if the lookup fails.
*/
struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
const char *label)
{
int i;
if (!label)
return NULL;
for (i = 0; i < dev->num_dev_node; i++) {
struct spmi_resource *r = &dev->dev_node[i];
if (r && r->label && !strncmp(r->label,
label, SPMI_MAX_RES_NAME))
return r;
}
return NULL;
}
EXPORT_SYMBOL(spmi_get_dev_container_byname);
| gpl-2.0 |
houzhenggang/bcm63xx-next | drivers/clk/at91/clk-usb.c | 438 | 9613 | /*
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include "pmc.h"
#define USB_SOURCE_MAX 2
#define SAM9X5_USB_DIV_SHIFT 8
#define SAM9X5_USB_MAX_DIV 0xf
#define RM9200_USB_DIV_SHIFT 28
#define RM9200_USB_DIV_TAB_SIZE 4
struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct at91_pmc *pmc;
};
#define to_at91sam9x5_clk_usb(hw) \
container_of(hw, struct at91sam9x5_clk_usb, hw)
struct at91rm9200_clk_usb {
struct clk_hw hw;
struct at91_pmc *pmc;
u32 divisors[4];
};
#define to_at91rm9200_clk_usb(hw) \
container_of(hw, struct at91rm9200_clk_usb, hw)
static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u32 tmp;
u8 usbdiv;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
tmp = pmc_read(pmc, AT91_PMC_USB);
usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
return parent_rate / (usbdiv + 1);
}
static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned long div;
unsigned long bestrate;
unsigned long tmp;
if (rate >= *parent_rate)
return *parent_rate;
div = *parent_rate / rate;
if (div >= SAM9X5_USB_MAX_DIV)
return *parent_rate / (SAM9X5_USB_MAX_DIV + 1);
bestrate = *parent_rate / div;
tmp = *parent_rate / (div + 1);
if (bestrate - rate > rate - tmp)
bestrate = tmp;
return bestrate;
}
static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
{
u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
if (index > 1)
return -EINVAL;
tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
if (index)
tmp |= AT91_PMC_USBS;
pmc_write(pmc, AT91_PMC_USB, tmp);
return 0;
}
static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
}
static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
unsigned long div = parent_rate / rate;
if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV)
return -EINVAL;
tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
pmc_write(pmc, AT91_PMC_USB, tmp);
return 0;
}
static const struct clk_ops at91sam9x5_usb_ops = {
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
.round_rate = at91sam9x5_clk_usb_round_rate,
.get_parent = at91sam9x5_clk_usb_get_parent,
.set_parent = at91sam9x5_clk_usb_set_parent,
.set_rate = at91sam9x5_clk_usb_set_rate,
};
static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
pmc_write(pmc, AT91_PMC_USB,
pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
return 0;
}
static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
pmc_write(pmc, AT91_PMC_USB,
pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
}
static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
}
static const struct clk_ops at91sam9n12_usb_ops = {
.enable = at91sam9n12_clk_usb_enable,
.disable = at91sam9n12_clk_usb_disable,
.is_enabled = at91sam9n12_clk_usb_is_enabled,
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
.round_rate = at91sam9x5_clk_usb_round_rate,
.set_rate = at91sam9x5_clk_usb_set_rate,
};
static struct clk * __init
at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_usb *usb;
struct clk *clk = NULL;
struct clk_init_data init;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &at91sam9x5_usb_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
usb->hw.init = &init;
usb->pmc = pmc;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
kfree(usb);
return clk;
}
static struct clk * __init
at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
const char *parent_name)
{
struct at91sam9x5_clk_usb *usb;
struct clk *clk = NULL;
struct clk_init_data init;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &at91sam9n12_usb_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = CLK_SET_RATE_GATE;
usb->hw.init = &init;
usb->pmc = pmc;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
kfree(usb);
return clk;
}
static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
u32 tmp;
u8 usbdiv;
tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
if (usb->divisors[usbdiv])
return parent_rate / usb->divisors[usbdiv];
return 0;
}
static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
unsigned long bestrate = 0;
int bestdiff = -1;
unsigned long tmprate;
int tmpdiff;
int i = 0;
for (i = 0; i < 4; i++) {
if (!usb->divisors[i])
continue;
tmprate = *parent_rate / usb->divisors[i];
if (tmprate < rate)
tmpdiff = rate - tmprate;
else
tmpdiff = tmprate - rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
}
if (!bestdiff)
break;
}
return bestrate;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
u32 tmp;
int i;
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct at91_pmc *pmc = usb->pmc;
unsigned long div = parent_rate / rate;
if (parent_rate % rate)
return -EINVAL;
for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
if (usb->divisors[i] == div) {
tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
~AT91_PMC_USBDIV;
tmp |= i << RM9200_USB_DIV_SHIFT;
pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
return 0;
}
}
return -EINVAL;
}
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
.round_rate = at91rm9200_clk_usb_round_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
static struct clk * __init
at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
const char *parent_name, const u32 *divisors)
{
struct at91rm9200_clk_usb *usb;
struct clk *clk = NULL;
struct clk_init_data init;
usb = kzalloc(sizeof(*usb), GFP_KERNEL);
if (!usb)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &at91rm9200_usb_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
init.flags = 0;
usb->hw.init = &init;
usb->pmc = pmc;
memcpy(usb->divisors, divisors, sizeof(usb->divisors));
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
kfree(usb);
return clk;
}
void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
struct at91_pmc *pmc)
{
struct clk *clk;
int i;
int num_parents;
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
return;
for (i = 0; i < num_parents; i++) {
parent_names[i] = of_clk_get_parent_name(np, i);
if (!parent_names[i])
return;
}
of_property_read_string(np, "clock-output-names", &name);
clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
struct at91_pmc *pmc)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
return;
of_property_read_string(np, "clock-output-names", &name);
clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
struct at91_pmc *pmc)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
return;
of_property_read_u32_array(np, "atmel,clk-divisors", divisors, 4);
if (!divisors[0])
return;
of_property_read_string(np, "clock-output-names", &name);
clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
| gpl-2.0 |
visi0nary/android_kernel_alps_k05ts_a | drivers/usb/host/imx21-hcd.c | 2230 | 48680 | /*
* USB Host Controller Driver for IMX21
*
* Copyright (C) 2006 Loping Dog Embedded Systems
* Copyright (C) 2009 Martin Fuzzey
* Originally written by Jay Monkman <jtm@lopingdog.com>
* Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* The i.MX21 USB hardware contains
* * 32 transfer descriptors (called ETDs)
* * 4Kb of Data memory
*
* The data memory is shared between the host and function controllers
* (but this driver only supports the host controller)
*
* So setting up a transfer involves:
* * Allocating a ETD
* * Fill in ETD with appropriate information
* * Allocating data memory (and putting the offset in the ETD)
* * Activate the ETD
* * Get interrupt when done.
*
* An ETD is assigned to each active endpoint.
*
* Low resource (ETD and Data memory) situations are handled differently for
* isochronous and non insosynchronous transactions :
*
* Non ISOC transfers are queued if either ETDs or Data memory are unavailable
*
* ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
* They allocate both ETDs and Data memory during URB submission
* (and fail if unavailable).
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include "imx21-hcd.h"
#ifdef DEBUG
#define DEBUG_LOG_FRAME(imx21, etd, event) \
(etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
#else
#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
#endif
static const char hcd_name[] = "imx21-hcd";
static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
{
return (struct imx21 *)hcd->hcd_priv;
}
/* =========================================== */
/* Hardware access helpers */
/* =========================================== */
static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
{
void __iomem *reg = imx21->regs + offset;
writel(readl(reg) | mask, reg);
}
static inline void clear_register_bits(struct imx21 *imx21,
u32 offset, u32 mask)
{
void __iomem *reg = imx21->regs + offset;
writel(readl(reg) & ~mask, reg);
}
static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
{
void __iomem *reg = imx21->regs + offset;
if (readl(reg) & mask)
writel(mask, reg);
}
static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
{
void __iomem *reg = imx21->regs + offset;
if (!(readl(reg) & mask))
writel(mask, reg);
}
static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
{
writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
}
static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
{
return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
}
static inline int wrap_frame(int counter)
{
return counter & 0xFFFF;
}
static inline int frame_after(int frame, int after)
{
/* handle wrapping like jiffies time_afer */
return (s16)((s16)after - (s16)frame) < 0;
}
static int imx21_hc_get_frame(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
}
static inline bool unsuitable_for_dma(dma_addr_t addr)
{
return (addr & 3) != 0;
}
#include "imx21-dbg.c"
static void nonisoc_urb_completed_for_etd(
struct imx21 *imx21, struct etd_priv *etd, int status);
static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
/* =========================================== */
/* ETD management */
/* =========================================== */
static int alloc_etd(struct imx21 *imx21)
{
int i;
struct etd_priv *etd = imx21->etd;
for (i = 0; i < USB_NUM_ETD; i++, etd++) {
if (etd->alloc == 0) {
memset(etd, 0, sizeof(imx21->etd[0]));
etd->alloc = 1;
debug_etd_allocated(imx21);
return i;
}
}
return -1;
}
static void disactivate_etd(struct imx21 *imx21, int num)
{
int etd_mask = (1 << num);
struct etd_priv *etd = &imx21->etd[num];
writel(etd_mask, imx21->regs + USBH_ETDENCLR);
clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
etd->active_count = 0;
DEBUG_LOG_FRAME(imx21, etd, disactivated);
}
static void reset_etd(struct imx21 *imx21, int num)
{
struct etd_priv *etd = imx21->etd + num;
int i;
disactivate_etd(imx21, num);
for (i = 0; i < 4; i++)
etd_writel(imx21, num, i, 0);
etd->urb = NULL;
etd->ep = NULL;
etd->td = NULL;
etd->bounce_buffer = NULL;
}
static void free_etd(struct imx21 *imx21, int num)
{
if (num < 0)
return;
if (num >= USB_NUM_ETD) {
dev_err(imx21->dev, "BAD etd=%d!\n", num);
return;
}
if (imx21->etd[num].alloc == 0) {
dev_err(imx21->dev, "ETD %d already free!\n", num);
return;
}
debug_etd_freed(imx21);
reset_etd(imx21, num);
memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
}
static void setup_etd_dword0(struct imx21 *imx21,
int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
{
etd_writel(imx21, etd_num, 0,
((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
((u32) dir << DW0_DIRECT) |
((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
1 : 0) << DW0_SPEED) |
((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
((u32) maxpacket << DW0_MAXPKTSIZ));
}
/**
* Copy buffer to data controller data memory.
* We cannot use memcpy_toio() because the hardware requires 32bit writes
*/
static void copy_to_dmem(
struct imx21 *imx21, int dmem_offset, void *src, int count)
{
void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
u32 word = 0;
u8 *p = src;
int byte = 0;
int i;
for (i = 0; i < count; i++) {
byte = i % 4;
word += (*p++ << (byte * 8));
if (byte == 3) {
writel(word, dmem);
dmem += 4;
word = 0;
}
}
if (count && byte != 3)
writel(word, dmem);
}
static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
{
u32 etd_mask = 1 << etd_num;
struct etd_priv *etd = &imx21->etd[etd_num];
if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
/* For non aligned isoc the condition below is always true */
if (etd->len <= etd->dmem_size) {
/* Fits into data memory, use PIO */
if (dir != TD_DIR_IN) {
copy_to_dmem(imx21,
etd->dmem_offset,
etd->cpu_buffer, etd->len);
}
etd->dma_handle = 0;
} else {
/* Too big for data memory, use bounce buffer */
enum dma_data_direction dmadir;
if (dir == TD_DIR_IN) {
dmadir = DMA_FROM_DEVICE;
etd->bounce_buffer = kmalloc(etd->len,
GFP_ATOMIC);
} else {
dmadir = DMA_TO_DEVICE;
etd->bounce_buffer = kmemdup(etd->cpu_buffer,
etd->len,
GFP_ATOMIC);
}
if (!etd->bounce_buffer) {
dev_err(imx21->dev, "failed bounce alloc\n");
goto err_bounce_alloc;
}
etd->dma_handle =
dma_map_single(imx21->dev,
etd->bounce_buffer,
etd->len,
dmadir);
if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
dev_err(imx21->dev, "failed bounce map\n");
goto err_bounce_map;
}
}
}
clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
if (etd->dma_handle) {
set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
} else {
if (dir != TD_DIR_IN) {
/* need to set for ZLP and PIO */
set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
}
}
DEBUG_LOG_FRAME(imx21, etd, activated);
#ifdef DEBUG
if (!etd->active_count) {
int i;
etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
etd->disactivated_frame = -1;
etd->last_int_frame = -1;
etd->last_req_frame = -1;
for (i = 0; i < 4; i++)
etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
}
#endif
etd->active_count = 1;
writel(etd_mask, imx21->regs + USBH_ETDENSET);
return;
err_bounce_map:
kfree(etd->bounce_buffer);
err_bounce_alloc:
free_dmem(imx21, etd);
nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
}
/* =========================================== */
/* Data memory management */
/* =========================================== */
static int alloc_dmem(struct imx21 *imx21, unsigned int size,
struct usb_host_endpoint *ep)
{
unsigned int offset = 0;
struct imx21_dmem_area *area;
struct imx21_dmem_area *tmp;
size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
if (size > DMEM_SIZE) {
dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
size, DMEM_SIZE);
return -EINVAL;
}
list_for_each_entry(tmp, &imx21->dmem_list, list) {
if ((size + offset) < offset)
goto fail;
if ((size + offset) <= tmp->offset)
break;
offset = tmp->size + tmp->offset;
if ((offset + size) > DMEM_SIZE)
goto fail;
}
area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
if (area == NULL)
return -ENOMEM;
area->ep = ep;
area->offset = offset;
area->size = size;
list_add_tail(&area->list, &tmp->list);
debug_dmem_allocated(imx21, size);
return offset;
fail:
return -ENOMEM;
}
/* Memory now available for a queued ETD - activate it */
static void activate_queued_etd(struct imx21 *imx21,
struct etd_priv *etd, u32 dmem_offset)
{
struct urb_priv *urb_priv = etd->urb->hcpriv;
int etd_num = etd - &imx21->etd[0];
u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
etd_num);
etd_writel(imx21, etd_num, 1,
((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
etd->dmem_offset = dmem_offset;
urb_priv->active = 1;
activate_etd(imx21, etd_num, dir);
}
static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
{
struct imx21_dmem_area *area;
struct etd_priv *tmp;
int found = 0;
int offset;
if (!etd->dmem_size)
return;
etd->dmem_size = 0;
offset = etd->dmem_offset;
list_for_each_entry(area, &imx21->dmem_list, list) {
if (area->offset == offset) {
debug_dmem_freed(imx21, area->size);
list_del(&area->list);
kfree(area);
found = 1;
break;
}
}
if (!found) {
dev_err(imx21->dev,
"Trying to free unallocated DMEM %d\n", offset);
return;
}
/* Try again to allocate memory for anything we've queued */
list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
if (offset >= 0) {
list_del(&etd->queue);
activate_queued_etd(imx21, etd, (u32)offset);
}
}
}
static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
{
struct imx21_dmem_area *area, *tmp;
list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
if (area->ep == ep) {
dev_err(imx21->dev,
"Active DMEM %d for disabled ep=%p\n",
area->offset, ep);
list_del(&area->list);
kfree(area);
}
}
}
/* =========================================== */
/* End handling */
/* =========================================== */
/* Endpoint now idle - release its ETD(s) or assign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i;
for (i = 0; i < NUM_ISO_ETDS; i++) {
int etd_num = ep_priv->etd[i];
struct etd_priv *etd;
if (etd_num < 0)
continue;
etd = &imx21->etd[etd_num];
ep_priv->etd[i] = -1;
free_dmem(imx21, etd); /* for isoc */
if (list_empty(&imx21->queue_for_etd)) {
free_etd(imx21, etd_num);
continue;
}
dev_dbg(imx21->dev,
"assigning idle etd %d for queued request\n", etd_num);
ep_priv = list_first_entry(&imx21->queue_for_etd,
struct ep_priv, queue);
list_del(&ep_priv->queue);
reset_etd(imx21, etd_num);
ep_priv->waiting_etd = 0;
ep_priv->etd[i] = etd_num;
if (list_empty(&ep_priv->ep->urb_list)) {
dev_err(imx21->dev, "No urb for queued ep!\n");
continue;
}
schedule_nonisoc_etd(imx21, list_first_entry(
&ep_priv->ep->urb_list, struct urb, urb_list));
}
}
static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
__releases(imx21->lock)
__acquires(imx21->lock)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct ep_priv *ep_priv = urb->ep->hcpriv;
struct urb_priv *urb_priv = urb->hcpriv;
debug_urb_completed(imx21, urb, status);
dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
kfree(urb_priv->isoc_td);
kfree(urb->hcpriv);
urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&imx21->lock);
usb_hcd_giveback_urb(hcd, urb, status);
spin_lock(&imx21->lock);
if (list_empty(&ep_priv->ep->urb_list))
ep_idle(imx21, ep_priv);
}
static void nonisoc_urb_completed_for_etd(
struct imx21 *imx21, struct etd_priv *etd, int status)
{
struct usb_host_endpoint *ep = etd->ep;
urb_done(imx21->hcd, etd->urb, status);
etd->urb = NULL;
if (!list_empty(&ep->urb_list)) {
struct urb *urb = list_first_entry(
&ep->urb_list, struct urb, urb_list);
dev_vdbg(imx21->dev, "next URB %p\n", urb);
schedule_nonisoc_etd(imx21, urb);
}
}
/* =========================================== */
/* ISOC Handling ... */
/* =========================================== */
static void schedule_isoc_etds(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct ep_priv *ep_priv = ep->hcpriv;
struct etd_priv *etd;
struct urb_priv *urb_priv;
struct td *td;
int etd_num;
int i;
int cur_frame;
u8 dir;
for (i = 0; i < NUM_ISO_ETDS; i++) {
too_late:
if (list_empty(&ep_priv->td_list))
break;
etd_num = ep_priv->etd[i];
if (etd_num < 0)
break;
etd = &imx21->etd[etd_num];
if (etd->urb)
continue;
td = list_entry(ep_priv->td_list.next, struct td, list);
list_del(&td->list);
urb_priv = td->urb->hcpriv;
cur_frame = imx21_hc_get_frame(hcd);
if (frame_after(cur_frame, td->frame)) {
dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
cur_frame, td->frame);
urb_priv->isoc_status = -EXDEV;
td->urb->iso_frame_desc[
td->isoc_index].actual_length = 0;
td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
if (--urb_priv->isoc_remaining == 0)
urb_done(hcd, td->urb, urb_priv->isoc_status);
goto too_late;
}
urb_priv->active = 1;
etd->td = td;
etd->ep = td->ep;
etd->urb = td->urb;
etd->len = td->len;
etd->dma_handle = td->dma_handle;
etd->cpu_buffer = td->cpu_buffer;
debug_isoc_submitted(imx21, cur_frame, td);
dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
etd_writel(imx21, etd_num, 1, etd->dmem_offset);
etd_writel(imx21, etd_num, 2,
(TD_NOTACCESSED << DW2_COMPCODE) |
((td->frame & 0xFFFF) << DW2_STARTFRM));
etd_writel(imx21, etd_num, 3,
(TD_NOTACCESSED << DW3_COMPCODE0) |
(td->len << DW3_PKTLEN0));
activate_etd(imx21, etd_num, dir);
}
}
static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
int etd_mask = 1 << etd_num;
struct etd_priv *etd = imx21->etd + etd_num;
struct urb *urb = etd->urb;
struct urb_priv *urb_priv = urb->hcpriv;
struct td *td = etd->td;
struct usb_host_endpoint *ep = etd->ep;
int isoc_index = td->isoc_index;
unsigned int pipe = urb->pipe;
int dir_in = usb_pipein(pipe);
int cc;
int bytes_xfrd;
disactivate_etd(imx21, etd_num);
cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
/* Input doesn't always fill the buffer, don't generate an error
* when this happens.
*/
if (dir_in && (cc == TD_DATAUNDERRUN))
cc = TD_CC_NOERROR;
if (cc == TD_NOTACCESSED)
bytes_xfrd = 0;
debug_isoc_completed(imx21,
imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
if (cc) {
urb_priv->isoc_status = -EXDEV;
dev_dbg(imx21->dev,
"bad iso cc=0x%X frame=%d sched frame=%d "
"cnt=%d len=%d urb=%p etd=%d index=%d\n",
cc, imx21_hc_get_frame(hcd), td->frame,
bytes_xfrd, td->len, urb, etd_num, isoc_index);
}
if (dir_in) {
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
if (!etd->dma_handle)
memcpy_fromio(etd->cpu_buffer,
imx21->regs + USBOTG_DMEM + etd->dmem_offset,
bytes_xfrd);
}
urb->actual_length += bytes_xfrd;
urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
etd->td = NULL;
etd->urb = NULL;
etd->ep = NULL;
if (--urb_priv->isoc_remaining == 0)
urb_done(hcd, urb, urb_priv->isoc_status);
schedule_isoc_etds(hcd, ep);
}
static struct ep_priv *alloc_isoc_ep(
struct imx21 *imx21, struct usb_host_endpoint *ep)
{
struct ep_priv *ep_priv;
int i;
ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
if (!ep_priv)
return NULL;
for (i = 0; i < NUM_ISO_ETDS; i++)
ep_priv->etd[i] = -1;
INIT_LIST_HEAD(&ep_priv->td_list);
ep_priv->ep = ep;
ep->hcpriv = ep_priv;
return ep_priv;
}
static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i, j;
int etd_num;
/* Allocate the ETDs if required */
for (i = 0; i < NUM_ISO_ETDS; i++) {
if (ep_priv->etd[i] < 0) {
etd_num = alloc_etd(imx21);
if (etd_num < 0)
goto alloc_etd_failed;
ep_priv->etd[i] = etd_num;
imx21->etd[etd_num].ep = ep_priv->ep;
}
}
return 0;
alloc_etd_failed:
dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
for (j = 0; j < i; j++) {
free_etd(imx21, ep_priv->etd[j]);
ep_priv->etd[j] = -1;
}
return -ENOMEM;
}
static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb, gfp_t mem_flags)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct urb_priv *urb_priv;
unsigned long flags;
struct ep_priv *ep_priv;
struct td *td = NULL;
int i;
int ret;
int cur_frame;
u16 maxpacket;
urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
if (urb_priv == NULL)
return -ENOMEM;
urb_priv->isoc_td = kzalloc(
sizeof(struct td) * urb->number_of_packets, mem_flags);
if (urb_priv->isoc_td == NULL) {
ret = -ENOMEM;
goto alloc_td_failed;
}
spin_lock_irqsave(&imx21->lock, flags);
if (ep->hcpriv == NULL) {
ep_priv = alloc_isoc_ep(imx21, ep);
if (ep_priv == NULL) {
ret = -ENOMEM;
goto alloc_ep_failed;
}
} else {
ep_priv = ep->hcpriv;
}
ret = alloc_isoc_etds(imx21, ep_priv);
if (ret)
goto alloc_etd_failed;
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto link_failed;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
urb->error_count = 0;
urb->hcpriv = urb_priv;
urb_priv->ep = ep;
/* allocate data memory for largest packets if not already done */
maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
for (i = 0; i < NUM_ISO_ETDS; i++) {
struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
/* not sure if this can really occur.... */
dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
etd->dmem_size, maxpacket);
ret = -EMSGSIZE;
goto alloc_dmem_failed;
}
if (etd->dmem_size == 0) {
etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
if (etd->dmem_offset < 0) {
dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
ret = -EAGAIN;
goto alloc_dmem_failed;
}
etd->dmem_size = maxpacket;
}
}
/* calculate frame */
cur_frame = imx21_hc_get_frame(hcd);
if (urb->transfer_flags & URB_ISO_ASAP) {
if (list_empty(&ep_priv->td_list))
urb->start_frame = cur_frame + 5;
else
urb->start_frame = list_entry(
ep_priv->td_list.prev,
struct td, list)->frame + urb->interval;
}
urb->start_frame = wrap_frame(urb->start_frame);
if (frame_after(cur_frame, urb->start_frame)) {
dev_dbg(imx21->dev,
"enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
urb->start_frame, cur_frame,
(urb->transfer_flags & URB_ISO_ASAP) != 0);
urb->start_frame = wrap_frame(cur_frame + 1);
}
/* set up transfers */
td = urb_priv->isoc_td;
for (i = 0; i < urb->number_of_packets; i++, td++) {
unsigned int offset = urb->iso_frame_desc[i].offset;
td->ep = ep;
td->urb = urb;
td->len = urb->iso_frame_desc[i].length;
td->isoc_index = i;
td->frame = wrap_frame(urb->start_frame + urb->interval * i);
td->dma_handle = urb->transfer_dma + offset;
td->cpu_buffer = urb->transfer_buffer + offset;
list_add_tail(&td->list, &ep_priv->td_list);
}
urb_priv->isoc_remaining = urb->number_of_packets;
dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
urb->number_of_packets, urb->start_frame, td->frame);
debug_urb_submitted(imx21, urb);
schedule_isoc_etds(hcd, ep);
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
alloc_dmem_failed:
usb_hcd_unlink_urb_from_ep(hcd, urb);
link_failed:
alloc_etd_failed:
alloc_ep_failed:
spin_unlock_irqrestore(&imx21->lock, flags);
kfree(urb_priv->isoc_td);
alloc_td_failed:
kfree(urb_priv);
return ret;
}
static void dequeue_isoc_urb(struct imx21 *imx21,
struct urb *urb, struct ep_priv *ep_priv)
{
struct urb_priv *urb_priv = urb->hcpriv;
struct td *td, *tmp;
int i;
if (urb_priv->active) {
for (i = 0; i < NUM_ISO_ETDS; i++) {
int etd_num = ep_priv->etd[i];
if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
struct etd_priv *etd = imx21->etd + etd_num;
reset_etd(imx21, etd_num);
free_dmem(imx21, etd);
}
}
}
list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
if (td->urb == urb) {
dev_vdbg(imx21->dev, "removing td %p\n", td);
list_del(&td->list);
}
}
}
/* =========================================== */
/* NON ISOC Handling ... */
/* =========================================== */
static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
{
unsigned int pipe = urb->pipe;
struct urb_priv *urb_priv = urb->hcpriv;
struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
int state = urb_priv->state;
int etd_num = ep_priv->etd[0];
struct etd_priv *etd;
u32 count;
u16 etd_buf_size;
u16 maxpacket;
u8 dir;
u8 bufround;
u8 datatoggle;
u8 interval = 0;
u8 relpolpos = 0;
if (etd_num < 0) {
dev_err(imx21->dev, "No valid ETD\n");
return;
}
if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
etd = &imx21->etd[etd_num];
maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
if (!maxpacket)
maxpacket = 8;
if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
if (state == US_CTRL_SETUP) {
dir = TD_DIR_SETUP;
if (unsuitable_for_dma(urb->setup_dma))
usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
urb);
etd->dma_handle = urb->setup_dma;
etd->cpu_buffer = urb->setup_packet;
bufround = 0;
count = 8;
datatoggle = TD_TOGGLE_DATA0;
} else { /* US_CTRL_ACK */
dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
bufround = 0;
count = 0;
datatoggle = TD_TOGGLE_DATA1;
}
} else {
dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
bufround = (dir == TD_DIR_IN) ? 1 : 0;
if (unsuitable_for_dma(urb->transfer_dma))
usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->transfer_dma;
etd->cpu_buffer = urb->transfer_buffer;
if (usb_pipebulk(pipe) && (state == US_BULK0))
count = 0;
else
count = urb->transfer_buffer_length;
if (usb_pipecontrol(pipe)) {
datatoggle = TD_TOGGLE_DATA1;
} else {
if (usb_gettoggle(
urb->dev,
usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)))
datatoggle = TD_TOGGLE_DATA1;
else
datatoggle = TD_TOGGLE_DATA0;
}
}
etd->urb = urb;
etd->ep = urb_priv->ep;
etd->len = count;
if (usb_pipeint(pipe)) {
interval = urb->interval;
relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
}
/* Write ETD to device memory */
setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
etd_writel(imx21, etd_num, 2,
(u32) interval << DW2_POLINTERV |
((u32) relpolpos << DW2_RELPOLPOS) |
((u32) dir << DW2_DIRPID) |
((u32) bufround << DW2_BUFROUND) |
((u32) datatoggle << DW2_DATATOG) |
((u32) TD_NOTACCESSED << DW2_COMPCODE));
/* DMA will always transfer buffer size even if TOBYCNT in DWORD3
is smaller. Make sure we don't overrun the buffer!
*/
if (count && count < maxpacket)
etd_buf_size = count;
else
etd_buf_size = maxpacket;
etd_writel(imx21, etd_num, 3,
((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
if (!count)
etd->dma_handle = 0;
/* allocate x and y buffer space at once */
etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
if (etd->dmem_offset < 0) {
/* Setup everything we can in HW and update when we get DMEM */
etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
debug_urb_queued_for_dmem(imx21, urb);
list_add_tail(&etd->queue, &imx21->queue_for_dmem);
return;
}
etd_writel(imx21, etd_num, 1,
(((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
(u32) etd->dmem_offset);
urb_priv->active = 1;
/* enable the ETD to kick off transfer */
dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
etd_num, count, dir != TD_DIR_IN ? "out" : "in");
activate_etd(imx21, etd_num, dir);
}
static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct etd_priv *etd = &imx21->etd[etd_num];
struct urb *urb = etd->urb;
u32 etd_mask = 1 << etd_num;
struct urb_priv *urb_priv = urb->hcpriv;
int dir;
int cc;
u32 bytes_xfrd;
int etd_done;
disactivate_etd(imx21, etd_num);
dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
/* save toggle carry */
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe),
(etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
if (dir == TD_DIR_IN) {
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
if (etd->bounce_buffer) {
memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
dma_unmap_single(imx21->dev,
etd->dma_handle, etd->len, DMA_FROM_DEVICE);
} else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
memcpy_fromio(etd->cpu_buffer,
imx21->regs + USBOTG_DMEM + etd->dmem_offset,
bytes_xfrd);
}
}
kfree(etd->bounce_buffer);
etd->bounce_buffer = NULL;
free_dmem(imx21, etd);
urb->error_count = 0;
if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
&& (cc == TD_DATAUNDERRUN))
cc = TD_CC_NOERROR;
if (cc != 0)
dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
etd_done = (cc_to_error[cc] != 0); /* stop if error */
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
switch (urb_priv->state) {
case US_CTRL_SETUP:
if (urb->transfer_buffer_length > 0)
urb_priv->state = US_CTRL_DATA;
else
urb_priv->state = US_CTRL_ACK;
break;
case US_CTRL_DATA:
urb->actual_length += bytes_xfrd;
urb_priv->state = US_CTRL_ACK;
break;
case US_CTRL_ACK:
etd_done = 1;
break;
default:
dev_err(imx21->dev,
"Invalid pipe state %d\n", urb_priv->state);
etd_done = 1;
break;
}
break;
case PIPE_BULK:
urb->actual_length += bytes_xfrd;
if ((urb_priv->state == US_BULK)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& urb->transfer_buffer_length > 0
&& ((urb->transfer_buffer_length %
usb_maxpacket(urb->dev, urb->pipe,
usb_pipeout(urb->pipe))) == 0)) {
/* need a 0-packet */
urb_priv->state = US_BULK0;
} else {
etd_done = 1;
}
break;
case PIPE_INTERRUPT:
urb->actual_length += bytes_xfrd;
etd_done = 1;
break;
}
if (etd_done)
nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
else {
dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
schedule_nonisoc_etd(imx21, urb);
}
}
static struct ep_priv *alloc_ep(void)
{
int i;
struct ep_priv *ep_priv;
ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
if (!ep_priv)
return NULL;
for (i = 0; i < NUM_ISO_ETDS; ++i)
ep_priv->etd[i] = -1;
return ep_priv;
}
static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb, gfp_t mem_flags)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct usb_host_endpoint *ep = urb->ep;
struct urb_priv *urb_priv;
struct ep_priv *ep_priv;
struct etd_priv *etd;
int ret;
unsigned long flags;
dev_vdbg(imx21->dev,
"enqueue urb=%p ep=%p len=%d "
"buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
urb, ep,
urb->transfer_buffer_length,
urb->transfer_buffer, urb->transfer_dma,
urb->setup_packet, urb->setup_dma);
if (usb_pipeisoc(urb->pipe))
return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
if (!urb_priv)
return -ENOMEM;
spin_lock_irqsave(&imx21->lock, flags);
ep_priv = ep->hcpriv;
if (ep_priv == NULL) {
ep_priv = alloc_ep();
if (!ep_priv) {
ret = -ENOMEM;
goto failed_alloc_ep;
}
ep->hcpriv = ep_priv;
ep_priv->ep = ep;
}
ret = usb_hcd_link_urb_to_ep(hcd, urb);
if (ret)
goto failed_link;
urb->status = -EINPROGRESS;
urb->actual_length = 0;
urb->error_count = 0;
urb->hcpriv = urb_priv;
urb_priv->ep = ep;
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
urb_priv->state = US_CTRL_SETUP;
break;
case PIPE_BULK:
urb_priv->state = US_BULK;
break;
}
debug_urb_submitted(imx21, urb);
if (ep_priv->etd[0] < 0) {
if (ep_priv->waiting_etd) {
dev_dbg(imx21->dev,
"no ETD available already queued %p\n",
ep_priv);
debug_urb_queued_for_etd(imx21, urb);
goto out;
}
ep_priv->etd[0] = alloc_etd(imx21);
if (ep_priv->etd[0] < 0) {
dev_dbg(imx21->dev,
"no ETD available queueing %p\n", ep_priv);
debug_urb_queued_for_etd(imx21, urb);
list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
ep_priv->waiting_etd = 1;
goto out;
}
}
/* Schedule if no URB already active for this endpoint */
etd = &imx21->etd[ep_priv->etd[0]];
if (etd->urb == NULL) {
DEBUG_LOG_FRAME(imx21, etd, last_req);
schedule_nonisoc_etd(imx21, urb);
}
out:
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
failed_link:
failed_alloc_ep:
spin_unlock_irqrestore(&imx21->lock, flags);
kfree(urb_priv);
return ret;
}
static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long flags;
struct usb_host_endpoint *ep;
struct ep_priv *ep_priv;
struct urb_priv *urb_priv = urb->hcpriv;
int ret = -EINVAL;
dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
urb, usb_pipeisoc(urb->pipe), status);
spin_lock_irqsave(&imx21->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret)
goto fail;
ep = urb_priv->ep;
ep_priv = ep->hcpriv;
debug_urb_unlinked(imx21, urb);
if (usb_pipeisoc(urb->pipe)) {
dequeue_isoc_urb(imx21, urb, ep_priv);
schedule_isoc_etds(hcd, ep);
} else if (urb_priv->active) {
int etd_num = ep_priv->etd[0];
if (etd_num != -1) {
struct etd_priv *etd = &imx21->etd[etd_num];
disactivate_etd(imx21, etd_num);
free_dmem(imx21, etd);
etd->urb = NULL;
kfree(etd->bounce_buffer);
etd->bounce_buffer = NULL;
}
}
urb_done(hcd, urb, status);
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
fail:
spin_unlock_irqrestore(&imx21->lock, flags);
return ret;
}
/* =========================================== */
/* Interrupt dispatch */
/* =========================================== */
static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
{
int etd_num;
int enable_sof_int = 0;
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
u32 etd_mask = 1 << etd_num;
u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
struct etd_priv *etd = &imx21->etd[etd_num];
if (done) {
DEBUG_LOG_FRAME(imx21, etd, last_int);
} else {
/*
* Kludge warning!
*
* When multiple transfers are using the bus we sometimes get into a state
* where the transfer has completed (the CC field of the ETD is != 0x0F),
* the ETD has self disabled but the ETDDONESTAT flag is not set
* (and hence no interrupt occurs).
* This causes the transfer in question to hang.
* The kludge below checks for this condition at each SOF and processes any
* blocked ETDs (after an arbitrary 10 frame wait)
*
* With a single active transfer the usbtest test suite will run for days
* without the kludge.
* With other bus activity (eg mass storage) even just test1 will hang without
* the kludge.
*/
u32 dword0;
int cc;
if (etd->active_count && !enabled) /* suspicious... */
enable_sof_int = 1;
if (!sof || enabled || !etd->active_count)
continue;
cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
if (cc == TD_NOTACCESSED)
continue;
if (++etd->active_count < 10)
continue;
dword0 = etd_readl(imx21, etd_num, 0);
dev_dbg(imx21->dev,
"unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
etd_num, dword0 & 0x7F,
(dword0 >> DW0_ENDPNT) & 0x0F,
cc);
#ifdef DEBUG
dev_dbg(imx21->dev,
"frame: act=%d disact=%d"
" int=%d req=%d cur=%d\n",
etd->activated_frame,
etd->disactivated_frame,
etd->last_int_frame,
etd->last_req_frame,
readl(imx21->regs + USBH_FRMNUB));
imx21->debug_unblocks++;
#endif
etd->active_count = 0;
/* End of kludge */
}
if (etd->ep == NULL || etd->urb == NULL) {
dev_dbg(imx21->dev,
"Interrupt for unexpected etd %d"
" ep=%p urb=%p\n",
etd_num, etd->ep, etd->urb);
disactivate_etd(imx21, etd_num);
continue;
}
if (usb_pipeisoc(etd->urb->pipe))
isoc_etd_done(hcd, etd_num);
else
nonisoc_etd_done(hcd, etd_num);
}
/* only enable SOF interrupt if it may be needed for the kludge */
if (enable_sof_int)
set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
else
clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
spin_unlock_irqrestore(&imx21->lock, flags);
}
static irqreturn_t imx21_irq(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
u32 ints = readl(imx21->regs + USBH_SYSISR);
if (ints & USBH_SYSIEN_HERRINT)
dev_dbg(imx21->dev, "Scheduling error\n");
if (ints & USBH_SYSIEN_SORINT)
dev_dbg(imx21->dev, "Scheduling overrun\n");
if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
writel(ints, imx21->regs + USBH_SYSISR);
return IRQ_HANDLED;
}
static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long flags;
struct ep_priv *ep_priv;
int i;
if (ep == NULL)
return;
spin_lock_irqsave(&imx21->lock, flags);
ep_priv = ep->hcpriv;
dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
if (!list_empty(&ep->urb_list))
dev_dbg(imx21->dev, "ep's URB list is not empty\n");
if (ep_priv != NULL) {
for (i = 0; i < NUM_ISO_ETDS; i++) {
if (ep_priv->etd[i] > -1)
dev_dbg(imx21->dev, "free etd %d for disable\n",
ep_priv->etd[i]);
free_etd(imx21, ep_priv->etd[i]);
}
kfree(ep_priv);
ep->hcpriv = NULL;
}
for (i = 0; i < USB_NUM_ETD; i++) {
if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
dev_err(imx21->dev,
"Active etd %d for disabled ep=%p!\n", i, ep);
free_etd(imx21, i);
}
}
free_epdmem(imx21, ep);
spin_unlock_irqrestore(&imx21->lock, flags);
}
/* =========================================== */
/* Hub handling */
/* =========================================== */
static int get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
desc->bDescriptorType = 0x29; /* HUB descriptor */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
& USBH_ROOTHUBA_NDNSTMPRT_MASK;
desc->bDescLength = 9;
desc->bPwrOn2PwrGood = 0;
desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
0x0002 | /* No power switching */
0x0010 | /* No over current protection */
0);
desc->u.hs.DeviceRemovable[0] = 1 << 1;
desc->u.hs.DeviceRemovable[1] = ~0;
return 0;
}
static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
int ports;
int changed = 0;
int i;
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
ports = readl(imx21->regs + USBH_ROOTHUBA)
& USBH_ROOTHUBA_NDNSTMPRT_MASK;
if (ports > 7) {
ports = 7;
dev_err(imx21->dev, "ports %d > 7\n", ports);
}
for (i = 0; i < ports; i++) {
if (readl(imx21->regs + USBH_PORTSTAT(i)) &
(USBH_PORTSTAT_CONNECTSC |
USBH_PORTSTAT_PRTENBLSC |
USBH_PORTSTAT_PRTSTATSC |
USBH_PORTSTAT_OVRCURIC |
USBH_PORTSTAT_PRTRSTSC)) {
changed = 1;
buf[0] |= 1 << (i + 1);
}
}
spin_unlock_irqrestore(&imx21->lock, flags);
if (changed)
dev_info(imx21->dev, "Hub status changed\n");
return changed;
}
static int imx21_hc_hub_control(struct usb_hcd *hcd,
u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
int rc = 0;
u32 status_write = 0;
switch (typeReq) {
case ClearHubFeature:
dev_dbg(imx21->dev, "ClearHubFeature\n");
switch (wValue) {
case C_HUB_OVER_CURRENT:
dev_dbg(imx21->dev, " OVER_CURRENT\n");
break;
case C_HUB_LOCAL_POWER:
dev_dbg(imx21->dev, " LOCAL_POWER\n");
break;
default:
dev_dbg(imx21->dev, " unknown\n");
rc = -EINVAL;
break;
}
break;
case ClearPortFeature:
dev_dbg(imx21->dev, "ClearPortFeature\n");
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
dev_dbg(imx21->dev, " ENABLE\n");
status_write = USBH_PORTSTAT_CURCONST;
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(imx21->dev, " SUSPEND\n");
status_write = USBH_PORTSTAT_PRTOVRCURI;
break;
case USB_PORT_FEAT_POWER:
dev_dbg(imx21->dev, " POWER\n");
status_write = USBH_PORTSTAT_LSDEVCON;
break;
case USB_PORT_FEAT_C_ENABLE:
dev_dbg(imx21->dev, " C_ENABLE\n");
status_write = USBH_PORTSTAT_PRTENBLSC;
break;
case USB_PORT_FEAT_C_SUSPEND:
dev_dbg(imx21->dev, " C_SUSPEND\n");
status_write = USBH_PORTSTAT_PRTSTATSC;
break;
case USB_PORT_FEAT_C_CONNECTION:
dev_dbg(imx21->dev, " C_CONNECTION\n");
status_write = USBH_PORTSTAT_CONNECTSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
status_write = USBH_PORTSTAT_OVRCURIC;
break;
case USB_PORT_FEAT_C_RESET:
dev_dbg(imx21->dev, " C_RESET\n");
status_write = USBH_PORTSTAT_PRTRSTSC;
break;
default:
dev_dbg(imx21->dev, " unknown\n");
rc = -EINVAL;
break;
}
break;
case GetHubDescriptor:
dev_dbg(imx21->dev, "GetHubDescriptor\n");
rc = get_hub_descriptor(hcd, (void *)buf);
break;
case GetHubStatus:
dev_dbg(imx21->dev, " GetHubStatus\n");
*(__le32 *) buf = 0;
break;
case GetPortStatus:
dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
wIndex, USBH_PORTSTAT(wIndex - 1));
*(__le32 *) buf = readl(imx21->regs +
USBH_PORTSTAT(wIndex - 1));
break;
case SetHubFeature:
dev_dbg(imx21->dev, "SetHubFeature\n");
switch (wValue) {
case C_HUB_OVER_CURRENT:
dev_dbg(imx21->dev, " OVER_CURRENT\n");
break;
case C_HUB_LOCAL_POWER:
dev_dbg(imx21->dev, " LOCAL_POWER\n");
break;
default:
dev_dbg(imx21->dev, " unknown\n");
rc = -EINVAL;
break;
}
break;
case SetPortFeature:
dev_dbg(imx21->dev, "SetPortFeature\n");
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
dev_dbg(imx21->dev, " SUSPEND\n");
status_write = USBH_PORTSTAT_PRTSUSPST;
break;
case USB_PORT_FEAT_POWER:
dev_dbg(imx21->dev, " POWER\n");
status_write = USBH_PORTSTAT_PRTPWRST;
break;
case USB_PORT_FEAT_RESET:
dev_dbg(imx21->dev, " RESET\n");
status_write = USBH_PORTSTAT_PRTRSTST;
break;
default:
dev_dbg(imx21->dev, " unknown\n");
rc = -EINVAL;
break;
}
break;
default:
dev_dbg(imx21->dev, " unknown\n");
rc = -EINVAL;
break;
}
if (status_write)
writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
return rc;
}
/* =========================================== */
/* Host controller management */
/* =========================================== */
static int imx21_hc_reset(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long timeout;
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
/* Reset the Host controller modules */
writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
imx21->regs + USBOTG_RST_CTRL);
/* Wait for reset to finish */
timeout = jiffies + HZ;
while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
if (time_after(jiffies, timeout)) {
spin_unlock_irqrestore(&imx21->lock, flags);
dev_err(imx21->dev, "timeout waiting for reset\n");
return -ETIMEDOUT;
}
spin_unlock_irq(&imx21->lock);
schedule_timeout_uninterruptible(1);
spin_lock_irq(&imx21->lock);
}
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static int imx21_hc_start(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long flags;
int i, j;
u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
u32 usb_control = 0;
hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
USBOTG_HWMODE_HOSTXCVR_MASK);
hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
USBOTG_HWMODE_OTGXCVR_MASK);
if (imx21->pdata->host1_txenoe)
usb_control |= USBCTRL_HOST1_TXEN_OE;
if (!imx21->pdata->host1_xcverless)
usb_control |= USBCTRL_HOST1_BYP_TLL;
if (imx21->pdata->otg_ext_xcvr)
usb_control |= USBCTRL_OTC_RCV_RXDP;
spin_lock_irqsave(&imx21->lock, flags);
writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
imx21->regs + USBOTG_CLK_CTRL);
writel(hw_mode, imx21->regs + USBOTG_HWMODE);
writel(usb_control, imx21->regs + USBCTRL);
writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
imx21->regs + USB_MISCCONTROL);
/* Clear the ETDs */
for (i = 0; i < USB_NUM_ETD; i++)
for (j = 0; j < 4; j++)
etd_writel(imx21, i, j, 0);
/* Take the HC out of reset */
writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
imx21->regs + USBH_HOST_CTRL);
/* Enable ports */
if (imx21->pdata->enable_otg_host)
writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
imx21->regs + USBH_PORTSTAT(0));
if (imx21->pdata->enable_host1)
writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
imx21->regs + USBH_PORTSTAT(1));
if (imx21->pdata->enable_host2)
writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
imx21->regs + USBH_PORTSTAT(2));
hcd->state = HC_STATE_RUNNING;
/* Enable host controller interrupts */
set_register_bits(imx21, USBH_SYSIEN,
USBH_SYSIEN_HERRINT |
USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
spin_unlock_irqrestore(&imx21->lock, flags);
return 0;
}
static void imx21_hc_stop(struct usb_hcd *hcd)
{
struct imx21 *imx21 = hcd_to_imx21(hcd);
unsigned long flags;
spin_lock_irqsave(&imx21->lock, flags);
writel(0, imx21->regs + USBH_SYSIEN);
clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
USBOTG_CLK_CTRL);
spin_unlock_irqrestore(&imx21->lock, flags);
}
/* =========================================== */
/* Driver glue */
/* =========================================== */
static struct hc_driver imx21_hc_driver = {
.description = hcd_name,
.product_desc = "IMX21 USB Host Controller",
.hcd_priv_size = sizeof(struct imx21),
.flags = HCD_USB11,
.irq = imx21_irq,
.reset = imx21_hc_reset,
.start = imx21_hc_start,
.stop = imx21_hc_stop,
/* I/O requests */
.urb_enqueue = imx21_hc_urb_enqueue,
.urb_dequeue = imx21_hc_urb_dequeue,
.endpoint_disable = imx21_hc_endpoint_disable,
/* scheduling support */
.get_frame_number = imx21_hc_get_frame,
/* Root hub support */
.hub_status_data = imx21_hc_hub_status_data,
.hub_control = imx21_hc_hub_control,
};
static struct mx21_usbh_platform_data default_pdata = {
.host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
.otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
.enable_host1 = 1,
.enable_host2 = 1,
.enable_otg_host = 1,
};
static int imx21_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct imx21 *imx21 = hcd_to_imx21(hcd);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
remove_debug_files(imx21);
usb_remove_hcd(hcd);
if (res != NULL) {
clk_disable_unprepare(imx21->clk);
clk_put(imx21->clk);
iounmap(imx21->regs);
release_mem_region(res->start, resource_size(res));
}
kfree(hcd);
return 0;
}
static int imx21_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct imx21 *imx21;
struct resource *res;
int ret;
int irq;
printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
hcd = usb_create_hcd(&imx21_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (hcd == NULL) {
dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
dev_name(&pdev->dev));
return -ENOMEM;
}
imx21 = hcd_to_imx21(hcd);
imx21->hcd = hcd;
imx21->dev = &pdev->dev;
imx21->pdata = pdev->dev.platform_data;
if (!imx21->pdata)
imx21->pdata = &default_pdata;
spin_lock_init(&imx21->lock);
INIT_LIST_HEAD(&imx21->dmem_list);
INIT_LIST_HEAD(&imx21->queue_for_etd);
INIT_LIST_HEAD(&imx21->queue_for_dmem);
create_debug_files(imx21);
res = request_mem_region(res->start, resource_size(res), hcd_name);
if (!res) {
ret = -EBUSY;
goto failed_request_mem;
}
imx21->regs = ioremap(res->start, resource_size(res));
if (imx21->regs == NULL) {
dev_err(imx21->dev, "Cannot map registers\n");
ret = -ENOMEM;
goto failed_ioremap;
}
/* Enable clocks source */
imx21->clk = clk_get(imx21->dev, NULL);
if (IS_ERR(imx21->clk)) {
dev_err(imx21->dev, "no clock found\n");
ret = PTR_ERR(imx21->clk);
goto failed_clock_get;
}
ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
if (ret)
goto failed_clock_set;
ret = clk_prepare_enable(imx21->clk);
if (ret)
goto failed_clock_enable;
dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
(readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
ret = usb_add_hcd(hcd, irq, 0);
if (ret != 0) {
dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
goto failed_add_hcd;
}
return 0;
failed_add_hcd:
clk_disable_unprepare(imx21->clk);
failed_clock_enable:
failed_clock_set:
clk_put(imx21->clk);
failed_clock_get:
iounmap(imx21->regs);
failed_ioremap:
release_mem_region(res->start, resource_size(res));
failed_request_mem:
remove_debug_files(imx21);
usb_put_hcd(hcd);
return ret;
}
static struct platform_driver imx21_hcd_driver = {
.driver = {
.name = (char *)hcd_name,
},
.probe = imx21_probe,
.remove = imx21_remove,
.suspend = NULL,
.resume = NULL,
};
module_platform_driver(imx21_hcd_driver);
MODULE_DESCRIPTION("i.MX21 USB Host controller");
MODULE_AUTHOR("Martin Fuzzey");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx21-hcd");
| gpl-2.0 |
rmtew/MediaTek-HelioX10-Kernel | alps/kernel-3.10/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 2486 | 8220 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
if (pfc_en)
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
/* Configure pause time */
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}
| gpl-2.0 |
Ant-Droid/android_kernel_moto_shamu | arch/ia64/kernel/process.c | 2486 | 18539 | /*
* Architecture-specific setup.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
*
* 2005-10-07 Keith Owens <kaos@sgi.com>
* Add notify_die() hooks.
*/
#include <linux/cpu.h>
#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/unistd.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kdebug.h>
#include <linux/utsname.h>
#include <linux/tracehook.h>
#include <linux/rcupdate.h>
#include <asm/cpu.h>
#include <asm/delay.h>
#include <asm/elf.h>
#include <asm/irq.h>
#include <asm/kexec.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/switch_to.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/unwind.h>
#include <asm/user.h>
#include "entry.h"
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#include "sigframe.h"
void (*ia64_mark_idle)(int);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
void
ia64_do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
char buf[128]; /* don't make it so big that it overflows the stack! */
printk("\nCall Trace:\n");
do {
unw_get_ip(info, &ip);
if (ip == 0)
break;
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
snprintf(buf, sizeof(buf),
" [<%016lx>] %%s\n"
" sp=%016lx bsp=%016lx\n",
ip, sp, bsp);
print_symbol(buf, ip);
} while (unw_unwind(info) >= 0);
}
void
show_stack (struct task_struct *task, unsigned long *sp)
{
if (!task)
unw_init_running(ia64_do_show_stack, NULL);
else {
struct unw_frame_info info;
unw_init_from_blocked_task(&info, task);
ia64_do_show_stack(&info, NULL);
}
}
void
show_regs (struct pt_regs *regs)
{
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
print_modules();
printk("\n");
show_regs_print_info(KERN_DEFAULT);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
init_utsname()->release);
print_symbol("ip is at %s\n", ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
regs->f7.u.bits[1], regs->f7.u.bits[0]);
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
if (user_mode(regs)) {
/* print the stacked registers */
unsigned long val, *bsp, ndirty;
int i, sof, is_nat = 0;
sof = regs->cr_ifs & 0x7f; /* size of frame */
ndirty = (regs->loadrs >> 19);
bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
for (i = 0; i < sof; ++i) {
get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
} else
show_stack(NULL, NULL);
}
/* local support for deprecated console_print */
void
console_print(const char *s)
{
printk(KERN_EMERG "%s", s);
}
void
do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
if (fsys_mode(current, &scr->pt)) {
/*
* defer signal-handling etc. until we return to
* privilege-level 0.
*/
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
}
#ifdef CONFIG_PERFMON
if (current->thread.pfm_needs_checking)
/*
* Note: pfm_handle_work() allow us to call it with interrupts
* disabled, and may enable interrupts within the function.
*/
pfm_handle_work();
#endif
/* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING)) {
local_irq_enable(); /* force interrupt enable */
ia64_do_signal(scr, in_syscall);
}
if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) {
local_irq_enable(); /* force interrupt enable */
tracehook_notify_resume(&scr->pt);
}
/* copy user rbs to kernel rbs */
if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
local_irq_enable(); /* force interrupt enable */
ia64_sync_krbs();
}
local_irq_disable(); /* force interrupt disable */
}
static int __init nohalt_setup(char * str)
{
cpu_idle_poll_ctrl(true);
return 1;
}
__setup("nohalt", nohalt_setup);
#ifdef CONFIG_HOTPLUG_CPU
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
{
unsigned int this_cpu = smp_processor_id();
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
max_xtp();
local_irq_disable();
idle_task_exit();
ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
/*
* The above is a point of no-return, the processor is
* expected to be in SAL loop now.
*/
BUG();
}
#else
static inline void play_dead(void)
{
BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */
void arch_cpu_idle_dead(void)
{
play_dead();
}
void arch_cpu_idle(void)
{
void (*mark_idle)(int) = ia64_mark_idle;
#ifdef CONFIG_SMP
min_xtp();
#endif
rmb();
if (mark_idle)
(*mark_idle)(1);
safe_halt();
if (mark_idle)
(*mark_idle)(0);
#ifdef CONFIG_SMP
normal_xtp();
#endif
}
void
ia64_save_extra (struct task_struct *task)
{
#ifdef CONFIG_PERFMON
unsigned long info;
#endif
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_save_debug_regs(&task->thread.dbr[0]);
#ifdef CONFIG_PERFMON
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
info = __get_cpu_var(pfm_syst_info);
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0);
#endif
}
void
ia64_load_extra (struct task_struct *task)
{
#ifdef CONFIG_PERFMON
unsigned long info;
#endif
if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
ia64_load_debug_regs(&task->thread.dbr[0]);
#ifdef CONFIG_PERFMON
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
info = __get_cpu_var(pfm_syst_info);
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1);
#endif
}
/*
* Copy the state of an ia-64 thread.
*
* We get here through the following call chain:
*
* from user-level: from kernel:
*
* <clone syscall> <some kernel call frames>
* sys_clone :
* do_fork do_fork
* copy_thread copy_thread
*
* This means that the stack layout is as follows:
*
* +---------------------+ (highest addr)
* | struct pt_regs |
* +---------------------+
* | struct switch_stack |
* +---------------------+
* | |
* | memory stack |
* | | <-- sp (lowest addr)
* +---------------------+
*
* Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
* integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
* with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
* pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
* the stack is page aligned and the page size is at least 4KB, this is always the case,
* so there is nothing to worry about.
*/
int
copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p)
{
extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size;
struct pt_regs *child_ptregs;
struct pt_regs *regs = current_pt_regs();
int retval = 0;
child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
child_stack = (struct switch_stack *) child_ptregs - 1;
rbs = (unsigned long) current + IA64_RBS_OFFSET;
child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
/* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16;
/*
* NOTE: The calling convention considers all floating point
* registers in the high partition (fph) to be scratch. Since
* the only way to get to this point is through a system call,
* we know that the values in fph are all dead. Hence, there
* is no need to inherit the fph state from the parent to the
* child and all we have to do is to make sure that
* IA64_THREAD_FPH_VALID is cleared in the child.
*
* XXX We could push this optimization a bit further by
* clearing IA64_THREAD_FPH_VALID on ANY system call.
* However, it's not clear this is worth doing. Also, it
* would be a slight deviation from the normal Linux system
* call behavior where scratch registers are preserved across
* system calls (unless used by the system call itself).
*/
# define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
| IA64_THREAD_PM_VALID)
# define THREAD_FLAGS_TO_SET 0
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET);
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
if (unlikely(p->flags & PF_KTHREAD)) {
if (unlikely(!user_stack_base)) {
/* fork_idle() called us */
return 0;
}
memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack));
child_stack->r4 = user_stack_base; /* payload */
child_stack->r5 = user_stack_size; /* argument */
/*
* Preserve PSR bits, except for bits 32-34 and 37-45,
* which we can't read.
*/
child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
/* mark as valid, empty frame */
child_ptregs->cr_ifs = 1UL << 63;
child_stack->ar_fpsr = child_ptregs->ar_fpsr
= ia64_getreg(_IA64_REG_AR_FPSR);
child_stack->pr = (1 << PRED_KERNEL_STACK);
child_stack->ar_bspstore = child_rbs;
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
return 0;
}
stack = ((struct switch_stack *) regs) - 1;
/* copy parent's switch_stack & pt_regs to child: */
memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
/* copy the parent's register backing store to the child: */
rbs_size = stack->ar_bspstore - rbs;
memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16;
child_ptregs->ar_bspstore = user_stack_base;
child_ptregs->ar_rnat = 0;
child_ptregs->loadrs = 0;
}
child_stack->ar_bspstore = child_rbs + rbs_size;
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* stop some PSR bits from being inherited.
* the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
* therefore we must specify them explicitly here and not include them in
* IA64_PSR_BITS_TO_CLEAR.
*/
child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
#ifdef CONFIG_PERFMON
if (current->thread.pfm_context)
pfm_inherit(p, child_ptregs);
#endif
return retval;
}
static void
do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
{
unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
unsigned long uninitialized_var(ip); /* GCC be quiet */
elf_greg_t *dst = arg;
struct pt_regs *pt;
char nat;
int i;
memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
if (unw_unwind_to_user(info) < 0)
return;
unw_get_sp(info, &sp);
pt = (struct pt_regs *) (sp + 16);
urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
return;
ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
&ar_rnat);
/*
* coredump format:
* r0-r31
* NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
* predicate registers (p0-p63)
* b0-b7
* ip cfm user-mask
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
*/
/* r0 is zero */
for (i = 1, mask = (1UL << i); i < 32; ++i) {
unw_get_gr(info, i, &dst[i], &nat);
if (nat)
nat_bits |= mask;
mask <<= 1;
}
dst[32] = nat_bits;
unw_get_pr(info, &dst[33]);
for (i = 0; i < 8; ++i)
unw_get_br(info, i, &dst[34 + i]);
unw_get_rp(info, &ip);
dst[42] = ip + ia64_psr(pt)->ri;
dst[43] = cfm;
dst[44] = pt->cr_ipsr & IA64_PSR_UM;
unw_get_ar(info, UNW_AR_RSC, &dst[45]);
/*
* For bsp and bspstore, unw_get_ar() would return the kernel
* addresses, but we need the user-level addresses instead:
*/
dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
dst[47] = pt->ar_bspstore;
dst[48] = ar_rnat;
unw_get_ar(info, UNW_AR_CCV, &dst[49]);
unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
unw_get_ar(info, UNW_AR_LC, &dst[53]);
unw_get_ar(info, UNW_AR_EC, &dst[54]);
unw_get_ar(info, UNW_AR_CSD, &dst[55]);
unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
void
do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
{
elf_fpreg_t *dst = arg;
int i;
memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
if (unw_unwind_to_user(info) < 0)
return;
/* f0 is 0.0, f1 is 1.0 */
for (i = 2; i < 32; ++i)
unw_get_fr(info, i, dst + i);
ia64_flush_fph(task);
if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
memcpy(dst + 32, task->thread.fph, 96*16);
}
void
do_copy_regs (struct unw_frame_info *info, void *arg)
{
do_copy_task_regs(current, info, arg);
}
void
do_dump_fpu (struct unw_frame_info *info, void *arg)
{
do_dump_task_fpu(current, info, arg);
}
void
ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
{
unw_init_running(do_copy_regs, dst);
}
int
dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
{
unw_init_running(do_dump_fpu, dst);
return 1; /* f0-f31 are always valid so we always return 1 */
}
/*
* Flush thread state. This is called when a thread does an execve().
*/
void
flush_thread (void)
{
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
}
/*
* Clean up state associated with current thread. This is called when
* the thread calls exit().
*/
void
exit_thread (void)
{
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
if (current->thread.pfm_context)
pfm_exit_thread(current);
/* free debug register resources */
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
}
unsigned long
get_wchan (struct task_struct *p)
{
struct unw_frame_info info;
unsigned long ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* Note: p may not be a blocked task (it could be current or
* another process running on some other CPU. Rather than
* trying to determine if p is really blocked, we just assume
* it's blocked and rely on the unwind routines to fail
* gracefully if the process wasn't really blocked after all.
* --davidm 99/12/15
*/
unw_init_from_blocked_task(&info, p);
do {
if (p->state == TASK_RUNNING)
return 0;
if (unw_unwind(&info) < 0)
return 0;
unw_get_ip(&info, &ip);
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
}
void
cpu_halt (void)
{
pal_power_mgmt_info_u_t power_info[8];
unsigned long min_power;
int i, min_power_state;
if (ia64_pal_halt_info(power_info) != 0)
return;
min_power_state = 0;
min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
for (i = 1; i < 8; ++i)
if (power_info[i].pal_power_mgmt_info_s.im
&& power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
min_power_state = i;
}
while (1)
ia64_pal_halt(min_power_state);
}
void machine_shutdown(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
cpu_down(cpu);
}
#endif
#ifdef CONFIG_KEXEC
kexec_disable_iosapic();
#endif
}
void
machine_restart (char *restart_cmd)
{
(void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
(*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
}
void
machine_halt (void)
{
(void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
cpu_halt();
}
void
machine_power_off (void)
{
if (pm_power_off)
pm_power_off();
machine_halt();
}
| gpl-2.0 |
LightningZap/kernel_shamu-lz | drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 2486 | 8220 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return 0;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
u8 *prio_type)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return 0;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
if (pfc_en)
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
/* Configure pause time */
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return 0;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg */
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return 0;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
}
| gpl-2.0 |
XileForce/Vindicator-S6-Uni-Old | drivers/leds/leds-cobalt-raq.c | 3254 | 3266 | /*
* LEDs driver for the Cobalt Raq series.
*
* Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/export.h>
#define LED_WEB 0x04
#define LED_POWER_OFF 0x08
static void __iomem *led_port;
static u8 led_value;
static DEFINE_SPINLOCK(led_value_lock);
static void raq_web_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
unsigned long flags;
spin_lock_irqsave(&led_value_lock, flags);
if (brightness)
led_value |= LED_WEB;
else
led_value &= ~LED_WEB;
writeb(led_value, led_port);
spin_unlock_irqrestore(&led_value_lock, flags);
}
static struct led_classdev raq_web_led = {
.name = "raq::web",
.brightness_set = raq_web_led_set,
};
static void raq_power_off_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
unsigned long flags;
spin_lock_irqsave(&led_value_lock, flags);
if (brightness)
led_value |= LED_POWER_OFF;
else
led_value &= ~LED_POWER_OFF;
writeb(led_value, led_port);
spin_unlock_irqrestore(&led_value_lock, flags);
}
static struct led_classdev raq_power_off_led = {
.name = "raq::power-off",
.brightness_set = raq_power_off_led_set,
.default_trigger = "power-off",
};
static int cobalt_raq_led_probe(struct platform_device *pdev)
{
struct resource *res;
int retval;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
led_port = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
retval = led_classdev_register(&pdev->dev, &raq_power_off_led);
if (retval)
goto err_null;
retval = led_classdev_register(&pdev->dev, &raq_web_led);
if (retval)
goto err_unregister;
return 0;
err_unregister:
led_classdev_unregister(&raq_power_off_led);
err_null:
led_port = NULL;
return retval;
}
static int cobalt_raq_led_remove(struct platform_device *pdev)
{
led_classdev_unregister(&raq_power_off_led);
led_classdev_unregister(&raq_web_led);
if (led_port)
led_port = NULL;
return 0;
}
static struct platform_driver cobalt_raq_led_driver = {
.probe = cobalt_raq_led_probe,
.remove = cobalt_raq_led_remove,
.driver = {
.name = "cobalt-raq-leds",
.owner = THIS_MODULE,
},
};
static int __init cobalt_raq_led_init(void)
{
return platform_driver_register(&cobalt_raq_led_driver);
}
module_init(cobalt_raq_led_init);
| gpl-2.0 |
Abhinav1997/kernel_z3 | fs/pstore/platform.c | 3254 | 6342 | /*
* Persistent Storage - platform driver interface parts.
*
* Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/atomic.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kmsg_dump.h>
#include <linux/module.h>
#include <linux/pstore.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/workqueue.h>
#include "internal.h"
/*
* We defer making "oops" entries appear in pstore - see
* whether the system is actually still running well enough
* to let someone see the entry
*/
#define PSTORE_INTERVAL (60 * HZ)
static int pstore_new_entry;
static void pstore_timefunc(unsigned long);
static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0);
static void pstore_dowork(struct work_struct *);
static DECLARE_WORK(pstore_work, pstore_dowork);
/*
* pstore_lock just protects "psinfo" during
* calls to pstore_register()
*/
static DEFINE_SPINLOCK(pstore_lock);
static struct pstore_info *psinfo;
static char *backend;
/* How much of the console log to snapshot */
static unsigned long kmsg_bytes = 10240;
void pstore_set_kmsg_bytes(int bytes)
{
kmsg_bytes = bytes;
}
/* Tag each group of saved records with a sequence number */
static int oopscount;
static const char *get_reason_str(enum kmsg_dump_reason reason)
{
switch (reason) {
case KMSG_DUMP_PANIC:
return "Panic";
case KMSG_DUMP_OOPS:
return "Oops";
case KMSG_DUMP_EMERG:
return "Emergency";
case KMSG_DUMP_RESTART:
return "Restart";
case KMSG_DUMP_HALT:
return "Halt";
case KMSG_DUMP_POWEROFF:
return "Poweroff";
default:
return "Unknown";
}
}
/*
* callback from kmsg_dump. (s2,l2) has the most recently
* written bytes, older bytes are in (s1,l1). Save as much
* as we can from the end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason,
const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
{
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
unsigned long size, total = 0;
char *dst;
const char *why;
u64 id;
int hsize, ret;
unsigned int part = 1;
unsigned long flags = 0;
int is_locked = 0;
why = get_reason_str(reason);
if (in_nmi()) {
is_locked = spin_trylock(&psinfo->buf_lock);
if (!is_locked)
pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
} else
spin_lock_irqsave(&psinfo->buf_lock, flags);
oopscount++;
while (total < kmsg_bytes) {
dst = psinfo->buf;
hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
size = psinfo->bufsize - hsize;
dst += hsize;
l2_cpy = min(l2, size);
l1_cpy = min(l1, size - l2_cpy);
if (l1_cpy + l2_cpy == 0)
break;
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
memcpy(dst, s1 + s1_start, l1_cpy);
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
hsize + l1_cpy + l2_cpy, psinfo);
if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_new_entry = 1;
l1 -= l1_cpy;
l2 -= l2_cpy;
total += l1_cpy + l2_cpy;
part++;
}
if (in_nmi()) {
if (is_locked)
spin_unlock(&psinfo->buf_lock);
} else
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
}
static struct kmsg_dumper pstore_dumper = {
.dump = pstore_dump,
};
/*
* platform specific persistent storage driver registers with
* us here. If pstore is already mounted, call the platform
* read function right away to populate the file system. If not
* then the pstore mount code will call us later to fill out
* the file system.
*
* Register with kmsg_dump to save last part of console log on panic.
*/
int pstore_register(struct pstore_info *psi)
{
struct module *owner = psi->owner;
spin_lock(&pstore_lock);
if (psinfo) {
spin_unlock(&pstore_lock);
return -EBUSY;
}
if (backend && strcmp(backend, psi->name)) {
spin_unlock(&pstore_lock);
return -EINVAL;
}
psinfo = psi;
mutex_init(&psinfo->read_mutex);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
psinfo = NULL;
return -EINVAL;
}
if (pstore_is_mounted())
pstore_get_records(0);
kmsg_dump_register(&pstore_dumper);
pstore_timer.expires = jiffies + PSTORE_INTERVAL;
add_timer(&pstore_timer);
return 0;
}
EXPORT_SYMBOL_GPL(pstore_register);
/*
* Read all the records from the persistent store. Create
* files in our filesystem. Don't warn about -EEXIST errors
* when we are re-scanning the backing store looking to add new
* error records.
*/
void pstore_get_records(int quiet)
{
struct pstore_info *psi = psinfo;
char *buf = NULL;
ssize_t size;
u64 id;
enum pstore_type_id type;
struct timespec time;
int failed = 0, rc;
if (!psi)
return;
mutex_lock(&psi->read_mutex);
if (psi->open && psi->open(psi))
goto out;
while ((size = psi->read(&id, &type, &time, &buf, psi)) > 0) {
rc = pstore_mkfile(type, psi->name, id, buf, (size_t)size,
time, psi);
kfree(buf);
buf = NULL;
if (rc && (rc != -EEXIST || !quiet))
failed++;
}
if (psi->close)
psi->close(psi);
out:
mutex_unlock(&psi->read_mutex);
if (failed)
printk(KERN_WARNING "pstore: failed to load %d record(s) from '%s'\n",
failed, psi->name);
}
static void pstore_dowork(struct work_struct *work)
{
pstore_get_records(1);
}
static void pstore_timefunc(unsigned long dummy)
{
if (pstore_new_entry) {
pstore_new_entry = 0;
schedule_work(&pstore_work);
}
mod_timer(&pstore_timer, jiffies + PSTORE_INTERVAL);
}
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use");
| gpl-2.0 |
brymaster5000/m7-501 | arch/arm/common/vic.c | 4790 | 11708 | /*
* linux/arch/arm/common/vic.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
/**
* struct vic_device - VIC PM device
* @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
* @int_select: Save for VIC_INT_SELECT.
* @int_enable: Save for VIC_INT_ENABLE.
* @soft_int: Save for VIC_INT_SOFT.
* @protect: Save for VIC_PROTECT.
* @domain: The IRQ domain for the VIC.
*/
struct vic_device {
void __iomem *base;
int irq;
u32 resume_sources;
u32 resume_irqs;
u32 int_select;
u32 int_enable;
u32 soft_int;
u32 protect;
struct irq_domain *domain;
};
/* we cannot allocate memory when VICs are initially registered */
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
static int vic_id;
/**
* vic_init2 - common initialisation code
* @base: Base of the VIC.
*
* Common initialisation code for registration
* and resume.
*/
static void vic_init2(void __iomem *base)
{
int i;
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(VIC_VECT_CNTL_ENABLE | i, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
#ifdef CONFIG_PM
static void resume_one_vic(struct vic_device *vic)
{
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
/* re-initialise static settings */
vic_init2(base);
writel(vic->int_select, base + VIC_INT_SELECT);
writel(vic->protect, base + VIC_PROTECT);
/* set the enabled ints and then clear the non-enabled */
writel(vic->int_enable, base + VIC_INT_ENABLE);
writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
/* and the same for the soft-int register */
writel(vic->soft_int, base + VIC_INT_SOFT);
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
}
static void vic_resume(void)
{
int id;
for (id = vic_id - 1; id >= 0; id--)
resume_one_vic(vic_devices + id);
}
static void suspend_one_vic(struct vic_device *vic)
{
void __iomem *base = vic->base;
printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
vic->int_select = readl(base + VIC_INT_SELECT);
vic->int_enable = readl(base + VIC_INT_ENABLE);
vic->soft_int = readl(base + VIC_INT_SOFT);
vic->protect = readl(base + VIC_PROTECT);
/* set the interrupts (if any) that are used for
* resuming the system */
writel(vic->resume_irqs, base + VIC_INT_ENABLE);
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
}
static int vic_suspend(void)
{
int id;
for (id = 0; id < vic_id; id++)
suspend_one_vic(vic_devices + id);
return 0;
}
struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
/**
* vic_pm_init - initicall to register VIC pm
*
* This is called via late_initcall() to register
* the resources for the VICs due to the early
* nature of the VIC's registration.
*/
static int __init vic_pm_init(void)
{
if (vic_id > 0)
register_syscore_ops(&vic_syscore_ops);
return 0;
}
late_initcall(vic_pm_init);
#endif /* CONFIG_PM */
/**
* vic_register() - Register a VIC.
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @resume_sources: bitmask of interrupts allowed for resume sources.
* @node: The device tree node associated with the VIC.
*
* Register the VIC with the system device tree so that it can be notified
* of suspend and resume requests and ensure that the correct actions are
* taken to re-instate the settings on resume.
*
* This also configures the IRQ domain for the VIC.
*/
static void __init vic_register(void __iomem *base, unsigned int irq,
u32 resume_sources, struct device_node *node)
{
struct vic_device *v;
if (vic_id >= ARRAY_SIZE(vic_devices)) {
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
return;
}
v = &vic_devices[vic_id];
v->base = base;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
v->domain = irq_domain_add_legacy(node, 32, irq, 0,
&irq_domain_simple_ops, v);
}
static void vic_ack_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
/* moreover, clear the soft-triggered, in case it was the reason */
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
}
static void vic_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
}
static void vic_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE);
}
#if defined(CONFIG_PM)
static struct vic_device *vic_from_irq(unsigned int irq)
{
struct vic_device *v = vic_devices;
unsigned int base_irq = irq & ~31;
int id;
for (id = 0; id < vic_id; id++, v++) {
if (v->irq == base_irq)
return v;
}
return NULL;
}
static int vic_set_wake(struct irq_data *d, unsigned int on)
{
struct vic_device *v = vic_from_irq(d->irq);
unsigned int off = d->hwirq;
u32 bit = 1 << off;
if (!v)
return -EINVAL;
if (!(bit & v->resume_sources))
return -EINVAL;
if (on)
v->resume_irqs |= bit;
else
v->resume_irqs &= ~bit;
return 0;
}
#else
#define vic_set_wake NULL
#endif /* CONFIG_PM */
static struct irq_chip vic_chip = {
.name = "VIC",
.irq_ack = vic_ack_irq,
.irq_mask = vic_mask_irq,
.irq_unmask = vic_unmask_irq,
.irq_set_wake = vic_set_wake,
};
static void __init vic_disable(void __iomem *base)
{
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
}
static void __init vic_clear_interrupts(void __iomem *base)
{
unsigned int i;
writel(0, base + VIC_PL190_VECT_ADDR);
for (i = 0; i < 19; i++) {
unsigned int value;
value = readl(base + VIC_PL190_VECT_ADDR);
writel(value, base + VIC_PL190_VECT_ADDR);
}
}
static void __init vic_set_irq_sources(void __iomem *base,
unsigned int irq_start, u32 vic_sources)
{
unsigned int i;
for (i = 0; i < 32; i++) {
if (vic_sources & (1 << i)) {
unsigned int irq = irq_start + i;
irq_set_chip_and_handler(irq, &vic_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
}
/*
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
* The original cell has 32 interrupts, while the modified one has 64,
* replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
* the probe function is called twice, with base set to offset 000
* and 020 within the page. We call this "second block".
*/
static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources, struct device_node *node)
{
unsigned int i;
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
/* Disable all interrupts initially. */
vic_disable(base);
/*
* Make sure we clear all existing interrupts. The vector registers
* in this cell are after the second block of general registers,
* so we can address them using standard offsets, but only from
* the second base address, which is 0x20 in the page
*/
if (vic_2nd_block) {
vic_clear_interrupts(base);
/* ST has 16 vectors as well, but we don't enable them by now */
for (i = 0; i < 16; i++) {
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
writel(0, reg);
}
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
}
vic_set_irq_sources(base, irq_start, vic_sources);
vic_register(base, irq_start, 0, node);
}
void __init __vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources,
struct device_node *node)
{
unsigned int i;
u32 cellid = 0;
enum amba_vendor vendor;
/* Identify which VIC cell this one is, by reading the ID */
for (i = 0; i < 4; i++) {
void __iomem *addr;
addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
cellid |= (readl(addr) & 0xff) << (8 * i);
}
vendor = (cellid >> 12) & 0xff;
printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
base, cellid, vendor);
switch(vendor) {
case AMBA_VENDOR_ST:
vic_init_st(base, irq_start, vic_sources, node);
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
/* fall through */
case AMBA_VENDOR_ARM:
break;
}
/* Disable all interrupts initially. */
vic_disable(base);
/* Make sure we clear all existing interrupts */
vic_clear_interrupts(base);
vic_init2(base);
vic_set_irq_sources(base, irq_start, vic_sources);
vic_register(base, irq_start, resume_sources, node);
}
/**
* vic_init() - initialise a vectored interrupt controller
* @base: iomem base address
* @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*/
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
__vic_init(base, irq_start, vic_sources, resume_sources, NULL);
}
#ifdef CONFIG_OF
int __init vic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *regs;
int irq_base;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
return -EIO;
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
if (WARN_ON(irq_base < 0))
goto out_unmap;
__vic_init(regs, irq_base, ~0, ~0, node);
return 0;
out_unmap:
iounmap(regs);
return -EIO;
}
#endif /* CONFIG OF */
/*
* Handle each interrupt in a single VIC. Returns non-zero if we've
* handled at least one interrupt. This reads the status register
* before handling each interrupt, which is necessary given that
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
{
u32 stat, irq;
int handled = 0;
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
handled = 1;
}
return handled;
}
/*
* Keep iterating over all registered VIC's until there are no pending
* interrupts.
*/
asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
{
int i, handled;
do {
for (i = 0, handled = 0; i < vic_id; ++i)
handled |= handle_one_vic(&vic_devices[i], regs);
} while (handled);
}
| gpl-2.0 |
junkTzu/kernel-MB860 | drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c | 8118 | 49749 |
//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
#include "ieee80211.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
u16 MCS_DATA_RATE[2][2][77] =
{ { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260,
39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520,
0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195,
195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260,
286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, // Long GI, 20MHz
{14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578,
0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217,
217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289,
318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, // Short GI, 20MHz
{ {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540,
81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080,
12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405,
405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540,
594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, // Long GI, 40MHz
{30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600,
90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200,
13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450,
450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600,
660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } // Short GI, 40MHz
};
static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
static u8 NETGEAR834Bv2_BROADCOM[3] = {0x00, 0x1b, 0x2f};
static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; //cosa 03202008
static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
//static u8 DLINK_ATHEROS[3] = {0x00, 0x1c, 0xf0};
static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
// 2008/04/01 MH For Cisco G mode RX TP We need to change FW duration. Should we put the
// code in other place??
//static u8 WIFI_CISCO_G_AP[3] = {0x00, 0x40, 0x96};
/********************************************************************************************************************
*function: This function update default settings in pHTInfo structure
* input: PRT_HIGH_THROUGHPUT pHTInfo
* output: none
* return: none
* notice: These value need be modified if any changes.
* *****************************************************************************************************************/
void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
//const typeof( ((struct ieee80211_device *)0)->pHTInfo ) *__mptr = &pHTInfo;
//printk("pHTinfo:%p, &pHTinfo:%p, mptr:%p, offsetof:%x\n", pHTInfo, &pHTInfo, __mptr, offsetof(struct ieee80211_device, pHTInfo));
//printk("===>ieee:%p,\n", ieee);
// ShortGI support
pHTInfo->bRegShortGI20MHz= 1;
pHTInfo->bRegShortGI40MHz= 1;
// 40MHz channel support
pHTInfo->bRegBW40MHz = 1;
// CCK rate support in 40MHz channel
if(pHTInfo->bRegBW40MHz)
pHTInfo->bRegSuppCCK = 1;
else
pHTInfo->bRegSuppCCK = true;
// AMSDU related
pHTInfo->nAMSDU_MaxSize = 7935UL;
pHTInfo->bAMSDU_Support = 0;
// AMPDU related
pHTInfo->bAMPDUEnable = 1;
pHTInfo->AMPDU_Factor = 2; //// 0: 2n13(8K), 1:2n14(16K), 2:2n15(32K), 3:2n16(64k)
pHTInfo->MPDU_Density = 0;// 0: No restriction, 1: 1/8usec, 2: 1/4usec, 3: 1/2usec, 4: 1usec, 5: 2usec, 6: 4usec, 7:8usec
// MIMO Power Save
pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.)
if(pHTInfo->SelfMimoPs == 2)
pHTInfo->SelfMimoPs = 3;
// 8190 only. Assign rate operation mode to firmware
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
#ifdef TO_DO_LIST
// 8190 only. Assign duration operation mode to firmware
pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur;
#endif
// 8190 only, Realtek proprietary aggregation mode
// Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others
// For Rx Reorder Control
pHTInfo->bRegRxReorderEnable = 1;
pHTInfo->RxReorderWinSize = 64;
pHTInfo->RxReorderPendingTime = 30;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
pHTInfo->UsbTxAggrNum = 4;
#endif
#ifdef USB_RX_AGGREGATION_SUPPORT
pHTInfo->UsbRxFwAggrEn = 1;
pHTInfo->UsbRxFwAggrPageNum = 24;
pHTInfo->UsbRxFwAggrPacketNum = 8;
pHTInfo->UsbRxFwAggrTimeout = 16; ////usb rx FW aggregation timeout threshold.It's in units of 64us
#endif
}
/********************************************************************************************************************
*function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq)
* input: u8* CapIE //Capability IE to be printed out
* u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
* *****************************************************************************************************************/
void HTDebugHTCapability(u8* CapIE, u8* TitleString )
{
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_CAPABILITY_ELE pCapELE;
if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap)))
{
//EWC IE
IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__);
pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[4]);
}else
pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[0]);
IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString );
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth)?"20MHz": "20/40MHz");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz)?"YES": "NO");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz)?"YES": "NO");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC)?"YES": "NO");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize)?"3839": "7935");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk)?"YES": "NO");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\
pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]);
return;
}
/********************************************************************************************************************
*function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp)
* input: u8* InfoIE //Capability IE to be printed out
* u8* TitleString //mainly print out caller function
* output: none
* return: none
* notice: Driver should not print out this message by default.
* *****************************************************************************************************************/
void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
{
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
PHT_INFORMATION_ELE pHTInfoEle;
if(!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
{
// Not EWC IE
IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__);
pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]);
}else
pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]);
IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel =");
switch(pHTInfoEle->ExtChlOffset)
{
case 0:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
break;
case 1:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n");
break;
case 2:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n");
break;
case 3:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n");
break;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = ");
switch(pHTInfoEle->OptMode)
{
case 0:
IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
break;
case 1:
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n");
break;
case 2:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n");
break;
case 3:
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n");
break;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\
pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]);
return;
}
/*
* Return: true if station in half n mode and AP supports 40 bw
*/
bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
retValue = false;
else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw
retValue = false;
else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw
retValue = true;
else
retValue = false;
return retValue;
}
bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode
retValue = false;
else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode
retValue = false;
else if(is40MHz) // ap support 40 bw
{
if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI
retValue = true;
else
retValue = false;
}
else
{
if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI
retValue = true;
else
retValue = false;
}
return retValue;
}
u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
{
u8 is40MHz;
u8 isShortGI;
is40MHz = (IsHTHalfNmode40Bandwidth(ieee))?1:0;
isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz))? 1:0;
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
}
u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8 is40MHz = (pHTInfo->bCurBW40MHz)?1:0;
u8 isShortGI = (pHTInfo->bCurBW40MHz)?
((pHTInfo->bCurShortGI40MHz)?1:0):
((pHTInfo->bCurShortGI20MHz)?1:0);
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)];
}
/********************************************************************************************************************
*function: This function returns current datarate.
* input: struct ieee80211_device* ieee
* u8 nDataRate
* output: none
* return: tx rate
* notice: quite unsure about how to use this function //wb
* *****************************************************************************************************************/
u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
{
//PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
if(nDataRate < 12)
{
return CCKOFDMRate[nDataRate];
}
else
{
if (nDataRate >= 0x10 && nDataRate <= 0x1f)//if(nDataRate > 11 && nDataRate < 28 )
{
is40MHz = 0;
isShortGI = 0;
// nDataRate = nDataRate - 12;
}
else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44)
{
is40MHz = 1;
isShortGI = 0;
//nDataRate = nDataRate - 28;
}
else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60)
{
is40MHz = 0;
isShortGI = 1;
//nDataRate = nDataRate - 44;
}
else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76)
{
is40MHz = 1;
isShortGI = 1;
//nDataRate = nDataRate - 60;
}
return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
}
}
bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
{
bool retValue = false;
struct ieee80211_network* net = &ieee->current_network;
if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
(memcmp(net->bssid, PCI_RALINK, 3)==0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
(net->ralink_cap_exist))
retValue = true;
else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
(memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
(memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) ||
(net->broadcom_cap_exist))
retValue = true;
else if(net->bssht.bdRT2RTAggregation)
retValue = true;
else
retValue = false;
return retValue;
}
/********************************************************************************************************************
*function: This function returns peer IOT.
* input: struct ieee80211_device* ieee
* output: none
* return:
* notice:
* *****************************************************************************************************************/
void HTIOTPeerDetermine(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
struct ieee80211_network* net = &ieee->current_network;
if(net->bssht.bdRT2RTAggregation)
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
else if(net->broadcom_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) ||
(memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)||
(memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)||
(memcmp(net->bssid, NETGEAR834Bv2_BROADCOM, 3)==0) )
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
(memcmp(net->bssid, PCI_RALINK, 3)==0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3)==0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3)==0) ||
net->ralink_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
else if(net->atheros_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
else if(memcmp(net->bssid, CISCO_BROADCOM, 3)==0)
pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
else
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer);
}
/********************************************************************************************************************
*function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good
* at receiving MCS14~15 frame from some AP.
* input: struct ieee80211_device* ieee
* u8 * PeerMacAddr
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
* *****************************************************************************************************************/
u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
{
u8 ret = 0;
return ret;
}
/**
* Function: HTIOTActIsDisableMCS15
*
* Overview: Check whether driver should declare capability of receiving MCS15
*
* Input:
* PADAPTER Adapter,
*
* Output: None
* Return: true if driver should disable MCS15
* 2008.04.15 Emily
*/
bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
{
bool retValue = false;
#ifdef TODO
// Apply for 819u only
#if (HAL_CODE_BASE==RTL8192)
#if (DEV_BUS_TYPE == USB_INTERFACE)
// Alway disable MCS15 by Jerry Chang's request.by Emily, 2008.04.15
retValue = true;
#elif (DEV_BUS_TYPE == PCI_INTERFACE)
// Enable MCS15 if the peer is Cisco AP. by Emily, 2008.05.12
// if(pBssDesc->bCiscoCapExist)
// retValue = false;
// else
retValue = false;
#endif
#endif
#endif
// Jerry Chang suggest that 8190 1x2 does not need to disable MCS15
return retValue;
}
/**
* Function: HTIOTActIsDisableMCSTwoSpatialStream
*
* Overview: Check whether driver should declare capability of receiving All 2 ss packets
*
* Input:
* PADAPTER Adapter,
*
* Output: None
* Return: true if driver should disable all two spatial stream packet
* 2008.04.21 Emily
*/
bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *PeerMacAddr)
{
bool retValue = false;
#ifdef TODO
// Apply for 819u only
#endif
return retValue;
}
/********************************************************************************************************************
*function: Check whether driver should disable EDCA turbo mode
* input: struct ieee80211_device* ieee
* u8* PeerMacAddr
* output: none
* return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
* *****************************************************************************************************************/
u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr)
{
u8 retValue = false; // default enable EDCA Turbo mode.
// Set specific EDCA parameter for different AP in DM handler.
return retValue;
}
/********************************************************************************************************************
*function: Check whether we need to use OFDM to sned MGNT frame for broadcom AP
* input: struct ieee80211_network *network //current network we live
* output: none
* return: return 1 if true
* *****************************************************************************************************************/
u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
{
u8 retValue = 0;
// 2008/01/25 MH Judeg if we need to use OFDM to sned MGNT frame for broadcom AP.
// 2008/01/28 MH We must prevent that we select null bssid to link.
if(network->broadcom_cap_exist)
{
retValue = 1;
}
return retValue;
}
u8 HTIOTActIsCCDFsync(u8* PeerMacAddr)
{
u8 retValue = 0;
if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
(memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) ||
(memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ==0))
{
retValue = 1;
}
return retValue;
}
void HTResetIOTSetting(
PRT_HIGH_THROUGHPUT pHTInfo
)
{
pHTInfo->IOTAction = 0;
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
}
/********************************************************************************************************************
*function: Construct Capablility Element in Beacon... if HTEnable is turned on
* input: struct ieee80211_device* ieee
* u8* posHTCap //pointer to store Capability Ele
* u8* len //store length of CE
* u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
* notice: posHTCap can't be null and should be initialized before.
* *****************************************************************************************************************/
void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_CAPABILITY_ELE pCapELE = NULL;
//u8 bIsDeclareMCS13;
if ((posHTCap == NULL) || (pHT == NULL))
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n");
return;
}
memset(posHTCap, 0, *len);
if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
{
u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]);
}else
{
pCapELE = (PHT_CAPABILITY_ELE)posHTCap;
}
//HT capability info
pCapELE->AdvCoding = 0; // This feature is not supported now!!
if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
{
pCapELE->ChlWidth = 0;
}
else
{
pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
}
// pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0);
pCapELE->MimoPwrSave = pHT->SelfMimoPs;
pCapELE->GreenField = 0; // This feature is not supported now!!
pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!!
pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!!
//DbgPrint("TX HT cap/info ele BW=%d SG20=%d SG40=%d\n\r",
//pCapELE->ChlWidth, pCapELE->ShortGI20Mhz, pCapELE->ShortGI40Mhz);
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0; // Do not support now!!
pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0;
pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0);
pCapELE->PSMP = 0; // Do not support now!!
pCapELE->LSigTxopProtect = 0; // Do not support now!!
//MAC HT parameters info
// TODO: Nedd to take care of this part
IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
if( IsEncrypt)
{
pCapELE->MPDUDensity = 7; // 8us
pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K
}
else
{
pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K
pCapELE->MPDUDensity = 0; // no density
}
//Supported MCS set
memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
if(pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
// 2008.06.12
// For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7.
if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
{
int i;
for(i = 1; i< 16; i++)
pCapELE->MCS[i] = 0;
}
//Extended HT Capability Info
memset(&pCapELE->ExtHTCapInfo, 0, 2);
//TXBF Capabilities
memset(pCapELE->TxBFCap, 0, 4);
//Antenna Selection Capabilities
pCapELE->ASCap = 0;
//add 2 to give space for element ID and len when construct frames
if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)
*len = 30 + 2;
else
*len = 26 + 2;
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTCap, *len -2);
//Print each field in detail. Driver should not print out this message by default
// HTDebugHTCapability(posHTCap, (u8*)"HTConstructCapability()");
return;
}
/********************************************************************************************************************
*function: Construct Information Element in Beacon... if HTEnable is turned on
* input: struct ieee80211_device* ieee
* u8* posHTCap //pointer to store Information Ele
* u8* len //store len of
* u8 IsEncrypt //whether encrypt, needed further
* output: none
* return: none
* notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this
* *****************************************************************************************************************/
void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
if ((posHTInfo == NULL) || (pHTInfoEle == NULL))
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n");
return;
}
memset(posHTInfo, 0, *len);
if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
{
pHTInfoEle->ControlChl = ieee->current_network.channel;
pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
(ieee->current_network.channel<=6)?
HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
pHTInfoEle->OptMode = pHT->CurrentOpMode;
pHTInfoEle->NonGFDevPresent = 0;
pHTInfoEle->DualBeacon = 0;
pHTInfoEle->SecondaryBeacon = 0;
pHTInfoEle->LSigTxopProtectFull = 0;
pHTInfoEle->PcoActive = 0;
pHTInfoEle->PcoPhase = 0;
memset(pHTInfoEle->BasicMSC, 0, 16);
*len = 22 + 2; //same above
}
else
{
//STA should not generate High Throughput Information Element
*len = 0;
}
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA | IEEE80211_DL_HT, posHTInfo, *len - 2);
//HTDebugHTInfo(posHTInfo, "HTConstructInforElement");
return;
}
/*
* According to experiment, Realtek AP to STA (based on rtl8190) may achieve best performance
* if both STA and AP set limitation of aggregation size to 32K, that is, set AMPDU density to 2
* (Ref: IEEE 11n specification). However, if Realtek STA associates to other AP, STA should set
* limitation of aggregation size to 8K, otherwise, performance of traffic stream from STA to AP
* will be much less than the traffic stream from AP to STA if both of the stream runs concurrently
* at the same time.
*
* Frame Format
* Element ID Length OUI Type1 Reserved
* 1 byte 1 byte 3 bytes 1 byte 1 byte
*
* OUI = 0x00, 0xe0, 0x4c,
* Type = 0x02
* Reserved = 0x00
*
* 2007.8.21 by Emily
*/
/********************************************************************************************************************
*function: Construct Information Element in Beacon... in RT2RT condition
* input: struct ieee80211_device* ieee
* u8* posRT2RTAgg //pointer to store Information Ele
* u8* len //store len
* output: none
* return: none
* notice:
* *****************************************************************************************************************/
void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len)
{
if (posRT2RTAgg == NULL) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n");
return;
}
memset(posRT2RTAgg, 0, *len);
*posRT2RTAgg++ = 0x00;
*posRT2RTAgg++ = 0xe0;
*posRT2RTAgg++ = 0x4c;
*posRT2RTAgg++ = 0x02;
*posRT2RTAgg++ = 0x01;
*posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02;
if(ieee->bSupportRemoteWakeUp) {
*posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW;
}
*len = 6 + 2;
return;
#ifdef TODO
#if (HAL_CODE_BASE == RTL8192 && DEV_BUS_TYPE == USB_INTERFACE)
/*
//Emily. If it is required to Ask Realtek AP to send AMPDU during AES mode, enable this
section of code.
if(IS_UNDER_11N_AES_MODE(Adapter))
{
posRT2RTAgg->Octet[5] |=RT_HT_CAP_USE_AMPDU;
}else
{
posRT2RTAgg->Octet[5] &= 0xfb;
}
*/
#else
// Do Nothing
#endif
posRT2RTAgg->Length = 6;
#endif
}
/********************************************************************************************************************
*function: Pick the right Rate Adaptive table to use
* input: struct ieee80211_device* ieee
* u8* pOperateMCS //A pointer to MCS rate bitmap
* return: always we return true
* notice:
* *****************************************************************************************************************/
u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
{
u8 i;
if (pOperateMCS == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n");
return false;
}
switch(ieee->mode)
{
case IEEE_A:
case IEEE_B:
case IEEE_G:
//legacy rate routine handled at selectedrate
//no MCS rate
for(i=0;i<=15;i++){
pOperateMCS[i] = 0;
}
break;
case IEEE_N_24G: //assume CCK rate ok
case IEEE_N_5G:
// Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G.
// Legacy part shall be handled at SelectRateSet().
//HT part
// TODO: may be different if we have different number of antenna
pOperateMCS[0] &=RATE_ADPT_1SS_MASK; //support MCS 0~7
pOperateMCS[1] &=RATE_ADPT_2SS_MASK;
pOperateMCS[3] &=RATE_ADPT_MCS32_MASK;
break;
//should never reach here
default:
break;
}
return true;
}
/*
* Description:
* This function will get the highest speed rate in input MCS set.
*
* /param Adapter Pionter to Adapter entity
* pMCSRateSet Pointer to MCS rate bitmap
* pMCSFilter Pointer to MCS rate filter
*
* /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter.
*
*/
/********************************************************************************************************************
*function: This function will get the highest speed rate in input MCS set.
* input: struct ieee80211_device* ieee
* u8* pMCSRateSet //Pointer to MCS rate bitmap
* u8* pMCSFilter //Pointer to MCS rate filter
* return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
* notice:
* *****************************************************************************************************************/
u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter)
{
u8 i, j;
u8 bitMap;
u8 mcsRate = 0;
u8 availableMcsRate[16];
if (pMCSRateSet == NULL || pMCSFilter == NULL)
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n");
return false;
}
for(i=0; i<16; i++)
availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
for(i = 0; i < 16; i++)
{
if(availableMcsRate[i] != 0)
break;
}
if(i == 16)
return false;
for(i = 0; i < 16; i++)
{
if(availableMcsRate[i] != 0)
{
bitMap = availableMcsRate[i];
for(j = 0; j < 8; j++)
{
if((bitMap%2) != 0)
{
if(HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate))
mcsRate = (8*i+j);
}
bitMap = bitMap>>1;
}
}
}
return (mcsRate|0x80);
}
/*
**
**1.Filter our operation rate set with AP's rate set
**2.shall reference channel bandwidth, STBC, Antenna number
**3.generate rate adative table for firmware
**David 20060906
**
** \pHTSupportedCap: the connected STA's supported rate Capability element
*/
u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperateMCS)
{
u8 i=0;
// filter out operational rate set not supported by AP, the lenth of it is 16
for(i=0;i<=15;i++){
pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i];
}
// TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number
// TODO: fill suggested rate adaptive rate index and give firmware info using Tx command packet
// we also shall suggested the first start rate set according to our singal strength
HT_PickMCSRate(ieee, pOperateMCS);
// For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7.
if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
//
// For RTL819X, we support only MCS0~15.
// And also, we do not know how to use MCS32 now.
//
for(i=2; i<=15; i++)
pOperateMCS[i] = 0;
return true;
}
void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
void HTOnAssocRsp(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PHT_CAPABILITY_ELE pPeerHTCap = NULL;
PHT_INFORMATION_ELE pPeerHTInfo = NULL;
u16 nMaxAMSDUSize = 0;
u8* pMcsFilter = NULL;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
if( pHTInfo->bCurrentHTSupport == false )
{
IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
return;
}
IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n");
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTCapBuf, sizeof(HT_CAPABILITY_ELE));
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, pHTInfo->PeerHTInfoBuf, sizeof(HT_INFORMATION_ELE));
// HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
// HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
//
if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap)))
pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]);
else
pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf);
if(!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]);
else
pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf);
////////////////////////////////////////////////////////
// Configurations:
////////////////////////////////////////////////////////
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE));
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTInfo, sizeof(HT_INFORMATION_ELE));
// Config Supported Channel Width setting
//
HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
// if(pHTInfo->bCurBW40MHz == true)
pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
//
// Update short GI/ long GI setting
//
// TODO:
pHTInfo->bCurShortGI20MHz=
((pHTInfo->bRegShortGI20MHz)?((pPeerHTCap->ShortGI20Mhz==1)?true:false):false);
pHTInfo->bCurShortGI40MHz=
((pHTInfo->bRegShortGI40MHz)?((pPeerHTCap->ShortGI40Mhz==1)?true:false):false);
//
// Config TX STBC setting
//
// TODO:
//
// Config DSSS/CCK mode in 40MHz mode
//
// TODO:
pHTInfo->bCurSuppCCK =
((pHTInfo->bRegSuppCCK)?((pPeerHTCap->DssCCk==1)?true:false):false);
//
// Config and configure A-MSDU setting
//
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize==0)?3839:7935;
if(pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize )
pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
else
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
//
// Config A-MPDU setting
//
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
// <1> Decide AMPDU Factor
// By Emily
if(!pHTInfo->bRegRT2RTAggregation)
{
// Decide AMPDU Factor according to protocol handshake
if(pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
}else
{
// Set MPDU density to 2 to Realtek AP, and set it to 0 for others
// Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily
if (ieee->current_network.bssht.bdRT2RTAggregation)
{
if( ieee->pairwise_key_type != KEY_TYPE_NA)
// Realtek may set 32k in security mode and 64k for others
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
}else
{
if(pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
}
}
// <2> Set AMPDU Minimum MPDU Start Spacing
// 802.11n 3.0 section 9.7d.3
if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
else
pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
if(ieee->pairwise_key_type != KEY_TYPE_NA )
pHTInfo->CurrentMPDUDensity = 7; // 8us
// Force TX AMSDU
// Lanhsin: mark for tmp to avoid deauth by ap from s3
//if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0)
if(0)
{
pHTInfo->bCurrentAMPDUEnable = false;
pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
pHTInfo->ForcedAMSDUMaxSize = 7935;
pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K;
}
// Rx Reorder Setting
pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
//
// Filter out unsupported HT rate for this AP
// Update RATR table
// This is only for 8190 ,8192 or later product which using firmware to handle rate adaptive mechanism.
//
// Handle Ralink AP bad MCS rate set condition. Joseph.
// This fix the bug of Ralink AP. This may be removed in the future.
if(pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
//
// Config MIMO Power Save setting
//
pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
if(pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
//WB add for MCS8 bug
// pMcsFilter = MCS_FILTER_1SS;
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
//
// Config current operation mode.
//
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
void HTSetConnectBwModeCallback(struct ieee80211_device* ieee);
/********************************************************************************************************************
*function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
* input: struct ieee80211_device* ieee
* output: none
* return: none
* notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP
********************************************************************************************************************/
// TODO: Should this funciton be called when receiving of Disassociation?
void HTInitializeHTInfo(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
//
// These parameters will be reset when receiving deauthentication packet
//
IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __FUNCTION__);
pHTInfo->bCurrentHTSupport = false;
// 40MHz channel support
pHTInfo->bCurBW40MHz = false;
pHTInfo->bCurTxBW40MHz = false;
// Short GI support
pHTInfo->bCurShortGI20MHz = false;
pHTInfo->bCurShortGI40MHz = false;
pHTInfo->bForcedShortGI = false;
// CCK rate support
// This flag is set to true to support CCK rate by default.
// It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities only when associate to
// 11N BSS.
pHTInfo->bCurSuppCCK = true;
// AMSDU related
pHTInfo->bCurrent_AMSDU_Support = false;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
// AMPUD related
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
// Initialize all of the parameters related to 11n
memset((void*)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap));
memset((void*)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo));
memset((void*)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf));
memset((void*)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf));
pHTInfo->bSwBwInProgress = false;
pHTInfo->ChnlOp = CHNLOP_NONE;
// Set default IEEE spec for Draft N
pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
// Realtek proprietary aggregation mode
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
pHTInfo->IOTPeer = 0;
pHTInfo->IOTAction = 0;
//MCS rate initialized here
{
u8* RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7
RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15
RegHTSuppRateSets[4] = 0x01; //support MCS 32
}
}
/********************************************************************************************************************
*function: initialize Bss HT structure(struct PBSS_HT)
* input: PBSS_HT pBssHT //to be initialized
* output: none
* return: none
* notice: This function is called when initialize network structure
********************************************************************************************************************/
void HTInitializeBssDesc(PBSS_HT pBssHT)
{
pBssHT->bdSupportHT = false;
memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
pBssHT->bdHTCapLen = 0;
memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
pBssHT->bdHTInfoLen = 0;
pBssHT->bdHTSpecVer= HT_SPEC_VER_IEEE;
pBssHT->bdRT2RTAggregation = false;
pBssHT->bdRT2RTLongSlotTime = false;
}
/********************************************************************************************************************
*function: initialize Bss HT structure(struct PBSS_HT)
* input: struct ieee80211_device *ieee
* struct ieee80211_network *pNetwork //usually current network we are live in
* output: none
* return: none
* notice: This function should ONLY be called before association
********************************************************************************************************************/
void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u16 nMaxAMSDUSize;
// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
// PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
// u8* pMcsFilter;
u8 bIOTAction = 0;
//
// Save Peer Setting before Association
//
IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __FUNCTION__);
/*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/
// if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT)
if (pNetwork->bssht.bdSupportHT)
{
pHTInfo->bCurrentHTSupport = true;
pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
// Save HTCap and HTInfo information Element
if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen);
if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf))
memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen);
// Check whether RT to RT aggregation mode is enabled
if(pHTInfo->bRegRT2RTAggregation)
{
pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation;
pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime;
}
else
{
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
}
// Determine the IOT Peer Vendor.
HTIOTPeerDetermine(ieee);
// Decide IOT Action
// Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided
pHTInfo->IOTAction = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee, pNetwork->bssid);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid);
if(bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
}
else
{
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
pHTInfo->IOTAction = 0;
}
}
void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf;
if(pHTInfo->bCurrentHTSupport)
{
//
// Config current operation mode.
//
if(pNetwork->bssht.bdHTInfoLen != 0)
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
//
// <TODO: Config according to OBSS non-HT STA present!!>
//
}
}
void HTUseDefaultSetting(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u8 regBwOpMode;
if(pHTInfo->bEnableHT)
{
pHTInfo->bCurrentHTSupport = true;
pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz;
pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz;
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
// Set BWOpMode register
//update RATR index0
HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet);
//function below is not implemented at all. WB
#ifdef TODO
Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet);
#endif
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
}
else
{
pHTInfo->bCurrentHTSupport = false;
}
return;
}
/********************************************************************************************************************
*function: check whether HT control field exists
* input: struct ieee80211_device *ieee
* u8* pFrame //coming skb->data
* output: none
* return: return true if HT control field exists(false otherwise)
* notice:
********************************************************************************************************************/
u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame)
{
if(ieee->pHTInfo->bCurrentHTSupport)
{
if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1)
{
IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n");
return true;
}
}
return false;
}
//
// This function set bandwidth mode in protocol layer.
//
void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u32 flags = 0;
if(pHTInfo->bRegBW40MHz == false)
return;
// To reduce dummy operation
// if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) ||
// (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset))
// return;
// spin_lock_irqsave(&(ieee->bw_spinlock), flags);
if(pHTInfo->bSwBwInProgress) {
// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
return;
}
//if in half N mode, set to 20M bandwidth please 09.08.2008 WB.
if(Bandwidth==HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)))
{
// Handle Illegal extension channel offset!!
if(ieee->current_network.channel<2 && Offset==HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
if(Offset==HT_EXTCHNL_OFFSET_UPPER || Offset==HT_EXTCHNL_OFFSET_LOWER) {
pHTInfo->bCurBW40MHz = true;
pHTInfo->CurSTAExtChnlOffset = Offset;
} else {
pHTInfo->bCurBW40MHz = false;
pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
} else {
pHTInfo->bCurBW40MHz = false;
pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
pHTInfo->bSwBwInProgress = true;
// TODO: 2007.7.13 by Emily Wait 2000ms in order to guarantee that switching
// bandwidth is executed after scan is finished. It is a temporal solution
// because software should ganrantee the last operation of switching bandwidth
// is executed properlly.
HTSetConnectBwModeCallback(ieee);
// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
}
void HTSetConnectBwModeCallback(struct ieee80211_device* ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __FUNCTION__);
if(pHTInfo->bCurBW40MHz)
{
if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev, ieee->current_network.channel+2);
else if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_LOWER)
ieee->set_chan(ieee->dev, ieee->current_network.channel-2);
else
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset);
} else {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
}
pHTInfo->bSwBwInProgress = false;
}
EXPORT_SYMBOL(HTUpdateSelfAndPeerSetting);
| gpl-2.0 |
VanirAOSP/kernel_samsung_jf | drivers/staging/winbond/mds.c | 8118 | 17881 | #include "mds_f.h"
#include "mto.h"
#include "wbhal.h"
#include "wb35tx_f.h"
unsigned char
Mds_initial(struct wbsoft_priv *adapter)
{
struct wb35_mds *pMds = &adapter->Mds;
pMds->TxPause = false;
pMds->TxRTSThreshold = DEFAULT_RTSThreshold;
pMds->TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer);
}
static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *buffer)
{
struct T00_descriptor *pT00;
struct T01_descriptor *pT01;
u16 Duration, NextBodyLen, OffsetSize;
u8 Rate, i;
unsigned char CTS_on = false, RTS_on = false;
struct T00_descriptor *pNextT00;
u16 BodyLen = 0;
unsigned char boGroupAddr = false;
OffsetSize = pDes->FragmentThreshold + 32 + 3;
OffsetSize &= ~0x03;
Rate = pDes->TxRate >> 1;
if (!Rate)
Rate = 1;
pT00 = (struct T00_descriptor *)buffer;
pT01 = (struct T01_descriptor *)(buffer+4);
pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize);
if (buffer[DOT_11_DA_OFFSET+8] & 0x1) /* +8 for USB hdr */
boGroupAddr = true;
/******************************************
* Set RTS/CTS mechanism
******************************************/
if (!boGroupAddr) {
/* NOTE : If the protection mode is enabled and the MSDU will be fragmented,
* the tx rates of MPDUs will all be DSSS rates. So it will not use
* CTS-to-self in this case. CTS-To-self will only be used when without
* fragmentation. -- 20050112 */
BodyLen = (u16)pT00->T00_frame_length; /* include 802.11 header */
BodyLen += 4; /* CRC */
if (BodyLen >= CURRENT_RTS_THRESHOLD)
RTS_on = true; /* Using RTS */
else {
if (pT01->T01_modulation_type) { /* Is using OFDM */
if (CURRENT_PROTECT_MECHANISM) /* Is using protect */
CTS_on = true; /* Using CTS */
}
}
}
if (RTS_on || CTS_on) {
if (pT01->T01_modulation_type) { /* Is using OFDM */
/* CTS duration
* 2 SIFS + DATA transmit time + 1 ACK
* ACK Rate : 24 Mega bps
* ACK frame length = 14 bytes */
Duration = 2*DEFAULT_SIFSTIME +
2*PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
((BodyLen*8 + 22 + Rate*4 - 1)/(Rate*4))*Tsym +
((112 + 22 + 95)/96)*Tsym;
} else { /* DSSS */
/* CTS duration
* 2 SIFS + DATA transmit time + 1 ACK
* Rate : ?? Mega bps
* ACK frame length = 14 bytes */
if (pT01->T01_plcp_header_length) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*2;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*2;
Duration += (((BodyLen + 14)*8 + Rate-1) / Rate +
DEFAULT_SIFSTIME*2);
}
if (RTS_on) {
if (pT01->T01_modulation_type) { /* Is using OFDM */
/* CTS + 1 SIFS + CTS duration
* CTS Rate : 24 Mega bps
* CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
((112 + 22 + 95)/96)*Tsym);
} else {
/* CTS + 1 SIFS + CTS duration
* CTS Rate : ?? Mega bps
* CTS frame length = 14 bytes */
if (pT01->T01_plcp_header_length) /* long preamble */
Duration += LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
Duration += (((112 + Rate-1) / Rate) + DEFAULT_SIFSTIME);
}
}
/* Set the value into USB descriptor */
pT01->T01_add_rts = RTS_on ? 1 : 0;
pT01->T01_add_cts = CTS_on ? 1 : 0;
pT01->T01_rts_cts_duration = Duration;
}
/******************************************
* Fill the more fragment descriptor
******************************************/
if (boGroupAddr)
Duration = 0;
else {
for (i = pDes->FragmentCount-1; i > 0; i--) {
NextBodyLen = (u16)pNextT00->T00_frame_length;
NextBodyLen += 4; /* CRC */
if (pT01->T01_modulation_type) {
/* OFDM
* data transmit time + 3 SIFS + 2 ACK
* Rate : ??Mega bps
* ACK frame length = 14 bytes, tx rate = 24M */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION * 3;
Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)/(Rate*4)) * Tsym +
(((2*14)*8 + 22 + 95)/96)*Tsym +
DEFAULT_SIFSTIME*3);
} else {
/* DSSS
* data transmit time + 2 ACK + 3 SIFS
* Rate : ??Mega bps
* ACK frame length = 14 bytes
* TODO : */
if (pT01->T01_plcp_header_length) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
Duration += (((NextBodyLen + (2*14))*8 + Rate-1) / Rate +
DEFAULT_SIFSTIME*3);
}
((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
/* ----20061009 add by anson's endian */
pNextT00->value = cpu_to_le32(pNextT00->value);
pT01->value = cpu_to_le32(pT01->value);
/* ----end 20061009 add by anson's endian */
buffer += OffsetSize;
pT01 = (struct T01_descriptor *)(buffer+4);
if (i != 1) /* The last fragment will not have the next fragment */
pNextT00 = (struct T00_descriptor *)(buffer+OffsetSize);
}
/*******************************************
* Fill the last fragment descriptor
*******************************************/
if (pT01->T01_modulation_type) {
/* OFDM
* 1 SIFS + 1 ACK
* Rate : 24 Mega bps
* ACK frame length = 14 bytes */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION;
/* The Tx rate of ACK use 24M */
Duration += (((112 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME);
} else {
/* DSSS
* 1 ACK + 1 SIFS
* Rate : ?? Mega bps
* ACK frame length = 14 bytes(112 bits) */
if (pT01->T01_plcp_header_length) /* long preamble */
Duration = LONG_PREAMBLE_PLUS_PLCPHEADER_TIME;
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
Duration += ((112 + Rate-1)/Rate + DEFAULT_SIFSTIME);
}
}
((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
pT00->value = cpu_to_le32(pT00->value);
pT01->value = cpu_to_le32(pT01->value);
/* --end 20061009 add */
}
/* The function return the 4n size of usb pk */
static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct T00_descriptor *pT00;
struct wb35_mds *pMds = &adapter->Mds;
u8 *buffer;
u8 *src_buffer;
u8 *pctmp;
u16 Size = 0;
u16 SizeLeft, CopySize, CopyLeft, stmp;
u8 buf_index, FragmentCount = 0;
/* Copy fragment body */
buffer = TargetBuffer; /* shift 8B usb + 24B 802.11 */
SizeLeft = pDes->buffer_total_size;
buf_index = pDes->buffer_start_index;
pT00 = (struct T00_descriptor *)buffer;
while (SizeLeft) {
pT00 = (struct T00_descriptor *)buffer;
CopySize = SizeLeft;
if (SizeLeft > pDes->FragmentThreshold) {
CopySize = pDes->FragmentThreshold;
pT00->T00_frame_length = 24 + CopySize; /* Set USB length */
} else
pT00->T00_frame_length = 24 + SizeLeft; /* Set USB length */
SizeLeft -= CopySize;
/* 1 Byte operation */
pctmp = (u8 *)(buffer + 8 + DOT_11_SEQUENCE_OFFSET);
*pctmp &= 0xf0;
*pctmp |= FragmentCount; /* 931130.5.m */
if (!FragmentCount)
pT00->T00_first_mpdu = 1;
buffer += 32; /* 8B usb + 24B 802.11 header */
Size += 32;
/* Copy into buffer */
stmp = CopySize + 3;
stmp &= ~0x03; /* 4n Alignment */
Size += stmp; /* Current 4n offset of mpdu */
while (CopySize) {
/* Copy body */
src_buffer = pDes->buffer_address[buf_index];
CopyLeft = CopySize;
if (CopySize >= pDes->buffer_size[buf_index]) {
CopyLeft = pDes->buffer_size[buf_index];
/* Get the next buffer of descriptor */
buf_index++;
buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
} else {
u8 *pctmp = pDes->buffer_address[buf_index];
pctmp += CopySize;
pDes->buffer_address[buf_index] = pctmp;
pDes->buffer_size[buf_index] -= CopySize;
}
memcpy(buffer, src_buffer, CopyLeft);
buffer += CopyLeft;
CopySize -= CopyLeft;
}
/* 931130.5.n */
if (pMds->MicAdd) {
if (!SizeLeft) {
pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - pMds->MicAdd;
pMds->MicWriteSize[pMds->MicWriteIndex] = pMds->MicAdd;
pMds->MicAdd = 0;
} else if (SizeLeft < 8) { /* 931130.5.p */
pMds->MicAdd = SizeLeft;
pMds->MicWriteAddress[pMds->MicWriteIndex] = buffer - (8 - SizeLeft);
pMds->MicWriteSize[pMds->MicWriteIndex] = 8 - SizeLeft;
pMds->MicWriteIndex++;
}
}
/* Does it need to generate the new header for next mpdu? */
if (SizeLeft) {
buffer = TargetBuffer + Size; /* Get the next 4n start address */
memcpy(buffer, TargetBuffer, 32); /* Copy 8B USB +24B 802.11 */
pT00 = (struct T00_descriptor *)buffer;
pT00->T00_first_mpdu = 0;
}
FragmentCount++;
}
pT00->T00_last_mpdu = 1;
pT00->T00_IsLastMpdu = 1;
buffer = (u8 *)pT00 + 8; /* +8 for USB hdr */
buffer[1] &= ~0x04; /* Clear more frag bit of 802.11 frame control */
pDes->FragmentCount = FragmentCount; /* Update the correct fragment number */
return Size;
}
static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct wb35_mds *pMds = &adapter->Mds;
u8 *src_buffer = pDes->buffer_address[0]; /* 931130.5.g */
struct T00_descriptor *pT00;
struct T01_descriptor *pT01;
u16 stmp;
u8 i, ctmp1, ctmp2, ctmpf;
u16 FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD;
stmp = pDes->buffer_total_size;
/*
* Set USB header 8 byte
*/
pT00 = (struct T00_descriptor *)TargetBuffer;
TargetBuffer += 4;
pT01 = (struct T01_descriptor *)TargetBuffer;
TargetBuffer += 4;
pT00->value = 0; /* Clear */
pT01->value = 0; /* Clear */
pT00->T00_tx_packet_id = pDes->Descriptor_ID; /* Set packet ID */
pT00->T00_header_length = 24; /* Set header length */
pT01->T01_retry_abort_ebable = 1; /* 921013 931130.5.h */
/* Key ID setup */
pT01->T01_wep_id = 0;
FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; /* Do not fragment */
/* Copy full data, the 1'st buffer contain all the data 931130.5.j */
memcpy(TargetBuffer, src_buffer, DOT_11_MAC_HEADER_SIZE); /* Copy header */
pDes->buffer_address[0] = src_buffer + DOT_11_MAC_HEADER_SIZE;
pDes->buffer_total_size -= DOT_11_MAC_HEADER_SIZE;
pDes->buffer_size[0] = pDes->buffer_total_size;
/* Set fragment threshold */
FragmentThreshold -= (DOT_11_MAC_HEADER_SIZE + 4);
pDes->FragmentThreshold = FragmentThreshold;
/* Set more frag bit */
TargetBuffer[1] |= 0x04; /* Set more frag bit */
/*
* Set tx rate
*/
stmp = *(u16 *)(TargetBuffer+30); /* 2n alignment address */
/* Use basic rate */
ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
pDes->TxRate = ctmp1;
pr_debug("Tx rate =%x\n", ctmp1);
pT01->T01_modulation_type = (ctmp1%3) ? 0 : 1;
for (i = 0; i < 2; i++) {
if (i == 1)
ctmp1 = ctmpf;
pMds->TxRate[pDes->Descriptor_ID][i] = ctmp1; /* backup the ta rate and fall back rate */
if (ctmp1 == 108)
ctmp2 = 7;
else if (ctmp1 == 96)
ctmp2 = 6; /* Rate convert for USB */
else if (ctmp1 == 72)
ctmp2 = 5;
else if (ctmp1 == 48)
ctmp2 = 4;
else if (ctmp1 == 36)
ctmp2 = 3;
else if (ctmp1 == 24)
ctmp2 = 2;
else if (ctmp1 == 18)
ctmp2 = 1;
else if (ctmp1 == 12)
ctmp2 = 0;
else if (ctmp1 == 22)
ctmp2 = 3;
else if (ctmp1 == 11)
ctmp2 = 2;
else if (ctmp1 == 4)
ctmp2 = 1;
else
ctmp2 = 0; /* if( ctmp1 == 2 ) or default */
if (i == 0)
pT01->T01_transmit_rate = ctmp2;
else
pT01->T01_fall_back_rate = ctmp2;
}
/*
* Set preamble type
*/
if ((pT01->T01_modulation_type == 0) && (pT01->T01_transmit_rate == 0)) /* RATE_1M */
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
}
static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
{
desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
desc->buffer_address[desc->InternalUsed] = adapter->sMlmeFrame.pMMPDU;
desc->buffer_size[desc->InternalUsed] = adapter->sMlmeFrame.len;
desc->buffer_total_size += adapter->sMlmeFrame.len;
desc->buffer_number++;
desc->Type = adapter->sMlmeFrame.DataType;
}
static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
{
int i;
/* Reclaim the data buffer */
for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
if (pData == (s8 *)&(adapter->sMlmeFrame.TxMMPDU[i]))
break;
}
if (adapter->sMlmeFrame.TxMMPDUInUse[i])
adapter->sMlmeFrame.TxMMPDUInUse[i] = false;
else {
/* Something wrong
PD43 Add debug code here??? */
}
}
static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
{
/* Reclaim the data buffer */
adapter->sMlmeFrame.len = 0;
MLMEfreeMMPDUBuffer(adapter, adapter->sMlmeFrame.pMMPDU);
/* Return resource */
adapter->sMlmeFrame.IsInUsed = PACKET_FREE_TO_USE;
}
void
Mds_Tx(struct wbsoft_priv *adapter)
{
struct hw_data *pHwData = &adapter->sHwData;
struct wb35_mds *pMds = &adapter->Mds;
struct wb35_descriptor TxDes;
struct wb35_descriptor *pTxDes = &TxDes;
u8 *XmitBufAddress;
u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
unsigned char BufferFilled = false;
if (pMds->TxPause)
return;
if (!hal_driver_init_OK(pHwData))
return;
/* Only one thread can be run here */
if (atomic_inc_return(&pMds->TxThreadCount) != 1)
goto cleanup;
/* Start to fill the data */
do {
FillIndex = pMds->TxFillIndex;
if (pMds->TxOwner[FillIndex]) { /* Is owned by software 0:Yes 1:No */
pr_debug("[Mds_Tx] Tx Owner is H/W.\n");
break;
}
XmitBufAddress = pMds->pTxBuffer + (MAX_USB_TX_BUFFER * FillIndex); /* Get buffer */
XmitBufSize = 0;
FillCount = 0;
do {
PacketSize = adapter->sMlmeFrame.len;
if (!PacketSize)
break;
/* For Check the buffer resource */
FragmentThreshold = CURRENT_FRAGMENT_THRESHOLD;
/* 931130.5.b */
FragmentCount = PacketSize/FragmentThreshold + 1;
stmp = PacketSize + FragmentCount*32 + 8; /* 931130.5.c 8:MIC */
if ((XmitBufSize + stmp) >= MAX_USB_TX_BUFFER) {
printk("[Mds_Tx] Excess max tx buffer.\n");
break; /* buffer is not enough */
}
/*
* Start transmitting
*/
BufferFilled = true;
/* Leaves first u8 intact */
memset((u8 *)pTxDes + 1, 0, sizeof(struct wb35_descriptor) - 1);
TxDesIndex = pMds->TxDesIndex; /* Get the current ID */
pTxDes->Descriptor_ID = TxDesIndex;
pMds->TxDesFrom[TxDesIndex] = 2; /* Storing the information of source coming from */
pMds->TxDesIndex++;
pMds->TxDesIndex %= MAX_USB_TX_DESCRIPTOR;
MLME_GetNextPacket(adapter, pTxDes);
/* Copy header. 8byte USB + 24byte 802.11Hdr. Set TxRate, Preamble type */
Mds_HeaderCopy(adapter, pTxDes, XmitBufAddress);
/* For speed up Key setting */
if (pTxDes->EapFix) {
pr_debug("35: EPA 4th frame detected. Size = %d\n", PacketSize);
pHwData->IsKeyPreSet = 1;
}
/* Copy (fragment) frame body, and set USB, 802.11 hdr flag */
CurrentSize = Mds_BodyCopy(adapter, pTxDes, XmitBufAddress);
/* Set RTS/CTS and Normal duration field into buffer */
Mds_DurationSet(adapter, pTxDes, XmitBufAddress);
/* Shift to the next address */
XmitBufSize += CurrentSize;
XmitBufAddress += CurrentSize;
/* Get packet to transmit completed, 1:TESTSTA 2:MLME 3: Ndis data */
MLME_SendComplete(adapter, 0, true);
/* Software TSC count 20060214 */
pMds->TxTsc++;
if (pMds->TxTsc == 0)
pMds->TxTsc_2++;
FillCount++; /* 20060928 */
} while (HAL_USB_MODE_BURST(pHwData)); /* End of multiple MSDU copy loop. false = single true = multiple sending */
/* Move to the next one, if necessary */
if (BufferFilled) {
/* size setting */
pMds->TxBufferSize[FillIndex] = XmitBufSize;
/* 20060928 set Tx count */
pMds->TxCountInBuffer[FillIndex] = FillCount;
/* Set owner flag */
pMds->TxOwner[FillIndex] = 1;
pMds->TxFillIndex++;
pMds->TxFillIndex %= MAX_USB_TX_BUFFER_NUMBER;
BufferFilled = false;
} else
break;
if (!PacketSize) /* No more pk for transmitting */
break;
} while (true);
/*
* Start to send by lower module
*/
if (!pHwData->IsKeyPreSet)
Wb35Tx_start(adapter);
cleanup:
atomic_dec(&pMds->TxThreadCount);
}
void
Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pT02)
{
struct wb35_mds *pMds = &adapter->Mds;
struct hw_data *pHwData = &adapter->sHwData;
u8 PacketId = (u8)pT02->T02_Tx_PktID;
unsigned char SendOK = true;
u8 RetryCount, TxRate;
if (pT02->T02_IgnoreResult) /* Don't care the result */
return;
if (pT02->T02_IsLastMpdu) {
/* TODO: DTO -- get the retry count and fragment count */
/* Tx rate */
TxRate = pMds->TxRate[PacketId][0];
RetryCount = (u8)pT02->T02_MPDU_Cnt;
if (pT02->value & FLAG_ERROR_TX_MASK) {
SendOK = false;
if (pT02->T02_transmit_abort || pT02->T02_out_of_MaxTxMSDULiftTime) {
/* retry error */
pHwData->dto_tx_retry_count += (RetryCount+1);
/* [for tx debug] */
if (RetryCount < 7)
pHwData->tx_retry_count[RetryCount] += RetryCount;
else
pHwData->tx_retry_count[7] += RetryCount;
pr_debug("dto_tx_retry_count =%d\n", pHwData->dto_tx_retry_count);
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
pHwData->dto_tx_frag_count += (RetryCount+1);
/* [for tx debug] */
if (pT02->T02_transmit_abort_due_to_TBTT)
pHwData->tx_TBTT_start_count++;
if (pT02->T02_transmit_without_encryption_due_to_wep_on_false)
pHwData->tx_WepOn_false_count++;
if (pT02->T02_discard_due_to_null_wep_key)
pHwData->tx_Null_key_count++;
} else {
if (pT02->T02_effective_transmission_rate)
pHwData->tx_ETR_count++;
MTO_SetTxCount(adapter, TxRate, RetryCount);
}
/* Clear send result buffer */
pMds->TxResult[PacketId] = 0;
} else
pMds->TxResult[PacketId] |= ((u16)(pT02->value & 0x0ffff));
}
| gpl-2.0 |
SudeepDuhoon/FirSt-kernel | arch/x86/kernel/cpu/transmeta.c | 11190 | 2945 | #include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include "cpu.h"
static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
{
u32 xlvl;
/* Transmeta-defined flags: level 0x80860001 */
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001);
}
}
static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
{
unsigned int cap_mask, uk, max, dummy;
unsigned int cms_rev1, cms_rev2;
unsigned int cpu_rev, cpu_freq = 0, cpu_flags, new_cpu_rev;
char cpu_info[65];
early_init_transmeta(c);
cpu_detect_cache_sizes(c);
/* Print CMS and CPU revision */
max = cpuid_eax(0x80860000);
cpu_rev = 0;
if (max >= 0x80860001) {
cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
if (cpu_rev != 0x02000000) {
printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
(cpu_rev >> 24) & 0xff,
(cpu_rev >> 16) & 0xff,
(cpu_rev >> 8) & 0xff,
cpu_rev & 0xff,
cpu_freq);
}
}
if (max >= 0x80860002) {
cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
if (cpu_rev == 0x02000000) {
printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
new_cpu_rev, cpu_freq);
}
printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
(cms_rev1 >> 24) & 0xff,
(cms_rev1 >> 16) & 0xff,
(cms_rev1 >> 8) & 0xff,
cms_rev1 & 0xff,
cms_rev2);
}
if (max >= 0x80860006) {
cpuid(0x80860003,
(void *)&cpu_info[0],
(void *)&cpu_info[4],
(void *)&cpu_info[8],
(void *)&cpu_info[12]);
cpuid(0x80860004,
(void *)&cpu_info[16],
(void *)&cpu_info[20],
(void *)&cpu_info[24],
(void *)&cpu_info[28]);
cpuid(0x80860005,
(void *)&cpu_info[32],
(void *)&cpu_info[36],
(void *)&cpu_info[40],
(void *)&cpu_info[44]);
cpuid(0x80860006,
(void *)&cpu_info[48],
(void *)&cpu_info[52],
(void *)&cpu_info[56],
(void *)&cpu_info[60]);
cpu_info[64] = '\0';
printk(KERN_INFO "CPU: %s\n", cpu_info);
}
/* Unhide possibly hidden capability flags */
rdmsr(0x80860004, cap_mask, uk);
wrmsr(0x80860004, ~0, uk);
c->x86_capability[0] = cpuid_edx(0x00000001);
wrmsr(0x80860004, cap_mask, uk);
/* All Transmeta CPUs have a constant TSC */
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
#ifdef CONFIG_SYSCTL
/*
* randomize_va_space slows us down enormously;
* it probably triggers retranslation of x86->native bytecode
*/
randomize_va_space = 0;
#endif
}
static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
.c_vendor = "Transmeta",
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
.c_early_init = early_init_transmeta,
.c_init = init_transmeta,
.c_x86_vendor = X86_VENDOR_TRANSMETA,
};
cpu_dev_register(transmeta_cpu_dev);
| gpl-2.0 |
schqiushui/kernel_kk444_sense_m8ace | arch/sh/boards/mach-highlander/irq-r7780rp.c | 13238 | 1586 | /*
* Renesas Solutions Highlander R7780RP-1 Support.
*
* Copyright (C) 2002 Atom Create Engineering Co., Ltd.
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2008 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/highlander.h>
enum {
UNUSED = 0,
/* board specific interrupt sources */
AX88796, /* Ethernet controller */
PSW, /* Push Switch */
CF, /* Compact Flash */
PCI_A,
PCI_B,
PCI_C,
PCI_D,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(PCI_A, 65), /* dirty: overwrite cpu vectors for pci */
INTC_IRQ(PCI_B, 66),
INTC_IRQ(PCI_C, 67),
INTC_IRQ(PCI_D, 68),
INTC_IRQ(CF, IRQ_CF),
INTC_IRQ(PSW, IRQ_PSW),
INTC_IRQ(AX88796, IRQ_AX88796),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xa5000000, 0, 16, /* IRLMSK */
{ PCI_A, PCI_B, PCI_C, PCI_D, CF, 0, 0, 0,
0, 0, 0, 0, 0, 0, PSW, AX88796 } },
};
static unsigned char irl2irq[HL_NR_IRL] __initdata = {
65, 66, 67, 68,
IRQ_CF, 0, 0, 0,
0, 0, 0, 0,
IRQ_AX88796, IRQ_PSW
};
static DECLARE_INTC_DESC(intc_desc, "r7780rp", vectors,
NULL, mask_registers, NULL, NULL);
unsigned char * __init highlander_plat_irq_setup(void)
{
if (__raw_readw(0xa5000600)) {
printk(KERN_INFO "Using r7780rp interrupt controller.\n");
register_intc_controller(&intc_desc);
return irl2irq;
}
return NULL;
}
| gpl-2.0 |
abhisit/TechNexion-linux | drivers/hid/hid-prodikeys.c | 951 | 21074 | /*
* HID driver for the Prodikeys PC-MIDI Keyboard
* providing midi & extra multimedia keys functionality
*
* Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk>
*
* Controls for Octave Shift Up/Down, Channel, and
* Sustain Duration available via sysfs.
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/hid.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
#include "hid-ids.h"
#define pk_debug(format, arg...) \
pr_debug("hid-prodikeys: " format "\n" , ## arg)
#define pk_error(format, arg...) \
pr_err("hid-prodikeys: " format "\n" , ## arg)
struct pcmidi_snd;
struct pk_device {
unsigned long quirks;
struct hid_device *hdev;
struct pcmidi_snd *pm; /* pcmidi device context */
};
struct pcmidi_sustain {
unsigned long in_use;
struct pcmidi_snd *pm;
struct timer_list timer;
unsigned char status;
unsigned char note;
unsigned char velocity;
};
#define PCMIDI_SUSTAINED_MAX 32
struct pcmidi_snd {
struct pk_device *pk;
unsigned short ifnum;
struct hid_report *pcmidi_report6;
struct input_dev *input_ep82;
unsigned short midi_mode;
unsigned short midi_sustain_mode;
unsigned short midi_sustain;
unsigned short midi_channel;
short midi_octave;
struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX];
unsigned short fn_state;
unsigned short last_key[24];
spinlock_t rawmidi_in_lock;
struct snd_card *card;
struct snd_rawmidi *rwmidi;
struct snd_rawmidi_substream *in_substream;
struct snd_rawmidi_substream *out_substream;
unsigned long in_triggered;
unsigned long out_active;
};
#define PK_QUIRK_NOGET 0x00010000
#define PCMIDI_MIDDLE_C 60
#define PCMIDI_CHANNEL_MIN 0
#define PCMIDI_CHANNEL_MAX 15
#define PCMIDI_OCTAVE_MIN (-2)
#define PCMIDI_OCTAVE_MAX 2
#define PCMIDI_SUSTAIN_MIN 0
#define PCMIDI_SUSTAIN_MAX 5000
static const char shortname[] = "PC-MIDI";
static const char longname[] = "Prodikeys PC-MIDI Keyboard";
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver");
MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver");
MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver");
/* Output routine for the sysfs channel file */
static ssize_t show_channel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel);
return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel,
PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX);
}
/* Input routine for the sysfs channel file */
static ssize_t store_channel(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
unsigned channel = 0;
if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) {
dbg_hid("pcmidi sysfs write channel=%u\n", channel);
pk->pm->midi_channel = channel;
return strlen(buf);
}
return -EINVAL;
}
static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel,
store_channel);
static struct device_attribute *sysfs_device_attr_channel = {
&dev_attr_channel,
};
/* Output routine for the sysfs sustain file */
static ssize_t show_sustain(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain);
return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain,
PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX);
}
/* Input routine for the sysfs sustain file */
static ssize_t store_sustain(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
unsigned sustain = 0;
if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) {
dbg_hid("pcmidi sysfs write sustain=%u\n", sustain);
pk->pm->midi_sustain = sustain;
pk->pm->midi_sustain_mode =
(0 == sustain || !pk->pm->midi_mode) ? 0 : 1;
return strlen(buf);
}
return -EINVAL;
}
static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain,
store_sustain);
static struct device_attribute *sysfs_device_attr_sustain = {
&dev_attr_sustain,
};
/* Output routine for the sysfs octave file */
static ssize_t show_octave(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave);
return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave,
PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX);
}
/* Input routine for the sysfs octave file */
static ssize_t store_octave(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hdev = container_of(dev, struct hid_device, dev);
struct pk_device *pk = hid_get_drvdata(hdev);
int octave = 0;
if (sscanf(buf, "%d", &octave) > 0 &&
octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) {
dbg_hid("pcmidi sysfs write octave=%d\n", octave);
pk->pm->midi_octave = octave;
return strlen(buf);
}
return -EINVAL;
}
static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave,
store_octave);
static struct device_attribute *sysfs_device_attr_octave = {
&dev_attr_octave,
};
static void pcmidi_send_note(struct pcmidi_snd *pm,
unsigned char status, unsigned char note, unsigned char velocity)
{
unsigned long flags;
unsigned char buffer[3];
buffer[0] = status;
buffer[1] = note;
buffer[2] = velocity;
spin_lock_irqsave(&pm->rawmidi_in_lock, flags);
if (!pm->in_substream)
goto drop_note;
if (!test_bit(pm->in_substream->number, &pm->in_triggered))
goto drop_note;
snd_rawmidi_receive(pm->in_substream, buffer, 3);
drop_note:
spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags);
return;
}
static void pcmidi_sustained_note_release(unsigned long data)
{
struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
}
static void init_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
pms = &pm->sustained_notes[i];
pms->in_use = 0;
pms->pm = pm;
setup_timer(&pms->timer, pcmidi_sustained_note_release,
(unsigned long)pms);
}
}
static void stop_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
pms = &pm->sustained_notes[i];
pms->in_use = 1;
del_timer_sync(&pms->timer);
}
}
static int pcmidi_get_output_report(struct pcmidi_snd *pm)
{
struct hid_device *hdev = pm->pk->hdev;
struct hid_report *report;
list_for_each_entry(report,
&hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) {
if (!(6 == report->id))
continue;
if (report->maxfield < 1) {
hid_err(hdev, "output report is empty\n");
break;
}
if (report->field[0]->report_count != 2) {
hid_err(hdev, "field count too low\n");
break;
}
pm->pcmidi_report6 = report;
return 0;
}
/* should never get here */
return -ENODEV;
}
static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state)
{
struct hid_device *hdev = pm->pk->hdev;
struct hid_report *report = pm->pcmidi_report6;
report->field[0]->value[0] = 0x01;
report->field[0]->value[1] = state;
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
}
static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data)
{
u32 bit_mask;
bit_mask = data[1];
bit_mask = (bit_mask << 8) | data[2];
bit_mask = (bit_mask << 8) | data[3];
dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
/*KEY_MAIL or octave down*/
if (pm->midi_mode && bit_mask == 0x004000) {
/* octave down */
pm->midi_octave--;
if (pm->midi_octave < -2)
pm->midi_octave = -2;
dbg_hid("pcmidi mode: %d octave: %d\n",
pm->midi_mode, pm->midi_octave);
return 1;
}
/*KEY_WWW or sustain*/
else if (pm->midi_mode && bit_mask == 0x000004) {
/* sustain on/off*/
pm->midi_sustain_mode ^= 0x1;
return 1;
}
return 0; /* continue key processing */
}
static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size)
{
struct pcmidi_sustain *pms;
unsigned i, j;
unsigned char status, note, velocity;
unsigned num_notes = (size-1)/2;
for (j = 0; j < num_notes; j++) {
note = data[j*2+1];
velocity = data[j*2+2];
if (note < 0x81) { /* note on */
status = 128 + 16 + pm->midi_channel; /* 1001nnnn */
note = note - 0x54 + PCMIDI_MIDDLE_C +
(pm->midi_octave * 12);
if (0 == velocity)
velocity = 1; /* force note on */
} else { /* note off */
status = 128 + pm->midi_channel; /* 1000nnnn */
note = note - 0x94 + PCMIDI_MIDDLE_C +
(pm->midi_octave*12);
if (pm->midi_sustain_mode) {
for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) {
pms = &pm->sustained_notes[i];
if (!pms->in_use) {
pms->status = status;
pms->note = note;
pms->velocity = velocity;
pms->in_use = 1;
mod_timer(&pms->timer,
jiffies +
msecs_to_jiffies(pm->midi_sustain));
return 1;
}
}
}
}
pcmidi_send_note(pm, status, note, velocity);
}
return 1;
}
static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
{
unsigned key;
u32 bit_mask;
u32 bit_index;
bit_mask = data[1];
bit_mask = (bit_mask << 8) | data[2];
bit_mask = (bit_mask << 8) | data[3];
/* break keys */
for (bit_index = 0; bit_index < 24; bit_index++) {
key = pm->last_key[bit_index];
if (!((0x01 << bit_index) & bit_mask)) {
input_event(pm->input_ep82, EV_KEY,
pm->last_key[bit_index], 0);
pm->last_key[bit_index] = 0;
}
}
/* make keys */
for (bit_index = 0; bit_index < 24; bit_index++) {
key = 0;
switch ((0x01 << bit_index) & bit_mask) {
case 0x000010: /* Fn lock*/
pm->fn_state ^= 0x000010;
if (pm->fn_state)
pcmidi_submit_output_report(pm, 0xc5);
else
pcmidi_submit_output_report(pm, 0xc6);
continue;
case 0x020000: /* midi launcher..send a key (qwerty) or not? */
pcmidi_submit_output_report(pm, 0xc1);
pm->midi_mode ^= 0x01;
dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
continue;
case 0x100000: /* KEY_MESSENGER or octave up */
dbg_hid("pcmidi mode: %d\n", pm->midi_mode);
if (pm->midi_mode) {
pm->midi_octave++;
if (pm->midi_octave > 2)
pm->midi_octave = 2;
dbg_hid("pcmidi mode: %d octave: %d\n",
pm->midi_mode, pm->midi_octave);
continue;
} else
key = KEY_MESSENGER;
break;
case 0x400000:
key = KEY_CALENDAR;
break;
case 0x080000:
key = KEY_ADDRESSBOOK;
break;
case 0x040000:
key = KEY_DOCUMENTS;
break;
case 0x800000:
key = KEY_WORDPROCESSOR;
break;
case 0x200000:
key = KEY_SPREADSHEET;
break;
case 0x010000:
key = KEY_COFFEE;
break;
case 0x000100:
key = KEY_HELP;
break;
case 0x000200:
key = KEY_SEND;
break;
case 0x000400:
key = KEY_REPLY;
break;
case 0x000800:
key = KEY_FORWARDMAIL;
break;
case 0x001000:
key = KEY_NEW;
break;
case 0x002000:
key = KEY_OPEN;
break;
case 0x004000:
key = KEY_CLOSE;
break;
case 0x008000:
key = KEY_SAVE;
break;
case 0x000001:
key = KEY_UNDO;
break;
case 0x000002:
key = KEY_REDO;
break;
case 0x000004:
key = KEY_SPELLCHECK;
break;
case 0x000008:
key = KEY_PRINT;
break;
}
if (key) {
input_event(pm->input_ep82, EV_KEY, key, 1);
pm->last_key[bit_index] = key;
}
}
return 1;
}
static int pcmidi_handle_report(
struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
{
int ret = 0;
switch (report_id) {
case 0x01: /* midi keys (qwerty)*/
ret = pcmidi_handle_report1(pm, data);
break;
case 0x03: /* midi keyboard (musical)*/
ret = pcmidi_handle_report3(pm, data, size);
break;
case 0x04: /* multimedia/midi keys (qwerty)*/
ret = pcmidi_handle_report4(pm, data);
break;
}
return ret;
}
static void pcmidi_setup_extra_keys(
struct pcmidi_snd *pm, struct input_dev *input)
{
/* reassigned functionality for N/A keys
MY PICTURES => KEY_WORDPROCESSOR
MY MUSIC=> KEY_SPREADSHEET
*/
unsigned int keys[] = {
KEY_FN,
KEY_MESSENGER, KEY_CALENDAR,
KEY_ADDRESSBOOK, KEY_DOCUMENTS,
KEY_WORDPROCESSOR,
KEY_SPREADSHEET,
KEY_COFFEE,
KEY_HELP, KEY_SEND,
KEY_REPLY, KEY_FORWARDMAIL,
KEY_NEW, KEY_OPEN,
KEY_CLOSE, KEY_SAVE,
KEY_UNDO, KEY_REDO,
KEY_SPELLCHECK, KEY_PRINT,
0
};
unsigned int *pkeys = &keys[0];
unsigned short i;
if (pm->ifnum != 1) /* only set up ONCE for interace 1 */
return;
pm->input_ep82 = input;
for (i = 0; i < 24; i++)
pm->last_key[i] = 0;
while (*pkeys != 0) {
set_bit(*pkeys, pm->input_ep82->keybit);
++pkeys;
}
}
static int pcmidi_set_operational(struct pcmidi_snd *pm)
{
if (pm->ifnum != 1)
return 0; /* only set up ONCE for interace 1 */
pcmidi_get_output_report(pm);
pcmidi_submit_output_report(pm, 0xc1);
return 0;
}
static int pcmidi_snd_free(struct snd_device *dev)
{
return 0;
}
static int pcmidi_in_open(struct snd_rawmidi_substream *substream)
{
struct pcmidi_snd *pm = substream->rmidi->private_data;
dbg_hid("pcmidi in open\n");
pm->in_substream = substream;
return 0;
}
static int pcmidi_in_close(struct snd_rawmidi_substream *substream)
{
dbg_hid("pcmidi in close\n");
return 0;
}
static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct pcmidi_snd *pm = substream->rmidi->private_data;
dbg_hid("pcmidi in trigger %d\n", up);
pm->in_triggered = up;
}
static struct snd_rawmidi_ops pcmidi_in_ops = {
.open = pcmidi_in_open,
.close = pcmidi_in_close,
.trigger = pcmidi_in_trigger
};
static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
{
static int dev;
struct snd_card *card;
struct snd_rawmidi *rwmidi;
int err;
static struct snd_device_ops ops = {
.dev_free = pcmidi_snd_free,
};
if (pm->ifnum != 1)
return 0; /* only set up midi device ONCE for interace 1 */
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
/* Setup sound card */
err = snd_card_new(&pm->pk->hdev->dev, index[dev], id[dev],
THIS_MODULE, 0, &card);
if (err < 0) {
pk_error("failed to create pc-midi sound card\n");
err = -ENOMEM;
goto fail;
}
pm->card = card;
/* Setup sound device */
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops);
if (err < 0) {
pk_error("failed to create pc-midi sound device: error %d\n",
err);
goto fail;
}
strncpy(card->driver, shortname, sizeof(card->driver));
strncpy(card->shortname, shortname, sizeof(card->shortname));
strncpy(card->longname, longname, sizeof(card->longname));
/* Set up rawmidi */
err = snd_rawmidi_new(card, card->shortname, 0,
0, 1, &rwmidi);
if (err < 0) {
pk_error("failed to create pc-midi rawmidi device: error %d\n",
err);
goto fail;
}
pm->rwmidi = rwmidi;
strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name));
rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT;
rwmidi->private_data = pm;
snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT,
&pcmidi_in_ops);
/* create sysfs variables */
err = device_create_file(&pm->pk->hdev->dev,
sysfs_device_attr_channel);
if (err < 0) {
pk_error("failed to create sysfs attribute channel: error %d\n",
err);
goto fail;
}
err = device_create_file(&pm->pk->hdev->dev,
sysfs_device_attr_sustain);
if (err < 0) {
pk_error("failed to create sysfs attribute sustain: error %d\n",
err);
goto fail_attr_sustain;
}
err = device_create_file(&pm->pk->hdev->dev,
sysfs_device_attr_octave);
if (err < 0) {
pk_error("failed to create sysfs attribute octave: error %d\n",
err);
goto fail_attr_octave;
}
spin_lock_init(&pm->rawmidi_in_lock);
init_sustain_timers(pm);
pcmidi_set_operational(pm);
/* register it */
err = snd_card_register(card);
if (err < 0) {
pk_error("failed to register pc-midi sound card: error %d\n",
err);
goto fail_register;
}
dbg_hid("pcmidi_snd_initialise finished ok\n");
return 0;
fail_register:
stop_sustain_timers(pm);
device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave);
fail_attr_octave:
device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain);
fail_attr_sustain:
device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel);
fail:
if (pm->card) {
snd_card_free(pm->card);
pm->card = NULL;
}
return err;
}
static int pcmidi_snd_terminate(struct pcmidi_snd *pm)
{
if (pm->card) {
stop_sustain_timers(pm);
device_remove_file(&pm->pk->hdev->dev,
sysfs_device_attr_channel);
device_remove_file(&pm->pk->hdev->dev,
sysfs_device_attr_sustain);
device_remove_file(&pm->pk->hdev->dev,
sysfs_device_attr_octave);
snd_card_disconnect(pm->card);
snd_card_free_when_closed(pm->card);
}
return 0;
}
/*
* PC-MIDI report descriptor for report id is wrong.
*/
static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize == 178 &&
rdesc[111] == 0x06 && rdesc[112] == 0x00 &&
rdesc[113] == 0xff) {
hid_info(hdev,
"fixing up pc-midi keyboard report descriptor\n");
rdesc[144] = 0x18; /* report 4: was 0x10 report count */
}
return rdesc;
}
static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct pk_device *pk = hid_get_drvdata(hdev);
struct pcmidi_snd *pm;
pm = pk->pm;
if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) &&
1 == pm->ifnum) {
pcmidi_setup_extra_keys(pm, hi->input);
return 0;
}
return 0;
}
static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct pk_device *pk = hid_get_drvdata(hdev);
int ret = 0;
if (1 == pk->pm->ifnum) {
if (report->id == data[0])
switch (report->id) {
case 0x01: /* midi keys (qwerty)*/
case 0x03: /* midi keyboard (musical)*/
case 0x04: /* extra/midi keys (qwerty)*/
ret = pcmidi_handle_report(pk->pm,
report->id, data, size);
break;
}
}
return ret;
}
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
unsigned long quirks = id->driver_data;
struct pk_device *pk;
struct pcmidi_snd *pm = NULL;
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
if (pk == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
return -ENOMEM;
}
pk->hdev = hdev;
pm = kzalloc(sizeof(*pm), GFP_KERNEL);
if (pm == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
ret = -ENOMEM;
goto err_free_pk;
}
pm->pk = pk;
pk->pm = pm;
pm->ifnum = ifnum;
hid_set_drvdata(hdev, pk);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "hid parse failed\n");
goto err_free;
}
if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */
hdev->quirks |= HID_QUIRK_NOGET;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto err_free;
}
ret = pcmidi_snd_initialise(pm);
if (ret < 0)
goto err_stop;
return 0;
err_stop:
hid_hw_stop(hdev);
err_free:
kfree(pm);
err_free_pk:
kfree(pk);
return ret;
}
static void pk_remove(struct hid_device *hdev)
{
struct pk_device *pk = hid_get_drvdata(hdev);
struct pcmidi_snd *pm;
pm = pk->pm;
if (pm) {
pcmidi_snd_terminate(pm);
kfree(pm);
}
hid_hw_stop(hdev);
kfree(pk);
}
static const struct hid_device_id pk_devices[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS,
USB_DEVICE_ID_PRODIKEYS_PCMIDI),
.driver_data = PK_QUIRK_NOGET},
{ }
};
MODULE_DEVICE_TABLE(hid, pk_devices);
static struct hid_driver pk_driver = {
.name = "prodikeys",
.id_table = pk_devices,
.report_fixup = pk_report_fixup,
.input_mapping = pk_input_mapping,
.raw_event = pk_raw_event,
.probe = pk_probe,
.remove = pk_remove,
};
module_hid_driver(pk_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
paulburton/axdroid-kernel | drivers/scsi/sgiwd93.c | 1207 | 8335 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
* Copyright (C) 2001 Florian Lohoff (flo@rfc822.org)
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* (In all truth, Jed Schimmel wrote all this code.)
*/
#undef DEBUG
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/wd.h>
#include "scsi.h"
#include "wd33c93.h"
struct ip22_hostdata {
struct WD33C93_hostdata wh;
dma_addr_t dma;
void *cpu;
struct device *dev;
};
#define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata))
struct hpc_chunk {
struct hpc_dma_desc desc;
u32 _padding; /* align to quadword boundary */
};
/* space for hpc dma descriptors */
#define HPC_DMA_SIZE PAGE_SIZE
#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
{
struct Scsi_Host * host = dev_id;
unsigned long flags;
spin_lock_irqsave(host->host_lock, flags);
wd33c93_intr(host);
spin_unlock_irqrestore(host->host_lock, flags);
return IRQ_HANDLED;
}
static inline
void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
{
unsigned long len = cmd->SCp.this_residual;
void *addr = cmd->SCp.ptr;
dma_addr_t physaddr;
unsigned long count;
struct hpc_chunk *hcp;
physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
cmd->SCp.dma_handle = physaddr;
hcp = hd->cpu;
while (len) {
/*
* even cntinfo could be up to 16383, without
* magic only 8192 works correctly
*/
count = len > 8192 ? 8192 : len;
hcp->desc.pbuf = physaddr;
hcp->desc.cntinfo = count;
hcp++;
len -= count;
physaddr += count;
}
/*
* To make sure, if we trip an HPC bug, that we transfer every single
* byte, we tag on an extra zero length dma descriptor at the end of
* the chain.
*/
hcp->desc.pbuf = 0;
hcp->desc.cntinfo = HPCDMA_EOX;
dma_cache_sync(hd->dev, hd->cpu,
(unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
DMA_TO_DEVICE);
}
static int dma_setup(struct scsi_cmnd *cmd, int datainp)
{
struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
struct hpc3_scsiregs *hregs =
(struct hpc3_scsiregs *) cmd->device->host->base;
pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu);
hdata->wh.dma_dir = datainp;
/*
* wd33c93 shouldn't pass us bogus dma_setups, but it does:-( The
* other wd33c93 drivers deal with it the same way (which isn't that
* obvious). IMHO a better fix would be, not to do these dma setups
* in the first place.
*/
if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
return 1;
fill_hpc_entries(hdata, cmd, datainp);
pr_debug(" HPCGO\n");
/* Start up the HPC. */
hregs->ndptr = hdata->dma;
if (datainp)
hregs->ctrl = HPC3_SCTRL_ACTIVE;
else
hregs->ctrl = HPC3_SCTRL_ACTIVE | HPC3_SCTRL_DIR;
return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
struct ip22_hostdata *hdata = host_to_hostdata(instance);
struct hpc3_scsiregs *hregs;
if (!SCpnt)
return;
if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
return;
hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
pr_debug("dma_stop: status<%d> ", status);
/* First stop the HPC and flush it's FIFO. */
if (hdata->wh.dma_dir) {
hregs->ctrl |= HPC3_SCTRL_FLUSH;
while (hregs->ctrl & HPC3_SCTRL_ACTIVE)
barrier();
}
hregs->ctrl = 0;
dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
SCpnt->SCp.this_residual,
DMA_DIR(hdata->wh.dma_dir));
pr_debug("\n");
}
void sgiwd93_reset(unsigned long base)
{
struct hpc3_scsiregs *hregs = (struct hpc3_scsiregs *) base;
hregs->ctrl = HPC3_SCTRL_CRESET;
udelay(50);
hregs->ctrl = 0;
}
EXPORT_SYMBOL_GPL(sgiwd93_reset);
static inline void init_hpc_chain(struct ip22_hostdata *hdata)
{
struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu;
dma_addr_t dma = hdata->dma;
unsigned long start, end;
start = (unsigned long) hcp;
end = start + HPC_DMA_SIZE;
while (start < end) {
hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk));
hcp->desc.cntinfo = HPCDMA_EOX;
hcp++;
dma += sizeof(struct hpc_chunk);
start += sizeof(struct hpc_chunk);
};
hcp--;
hcp->desc.pnext = hdata->dma;
}
static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
{
/* FIXME perform bus-specific reset */
/* FIXME 2: kill this function, and let midlayer fallback
to the same result, calling wd33c93_host_reset() */
spin_lock_irq(cmd->device->host->host_lock);
wd33c93_host_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return SUCCESS;
}
/*
* Kludge alert - the SCSI code calls the abort and reset method with int
* arguments not with pointers. So this is going to blow up beautyfully
* on 64-bit systems with memory outside the compat address spaces.
*/
static struct scsi_host_template sgiwd93_template = {
.module = THIS_MODULE,
.proc_name = "SGIWD93",
.name = "SGI WD93",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
.eh_bus_reset_handler = sgiwd93_bus_reset,
.eh_host_reset_handler = wd33c93_host_reset,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 8,
.use_clustering = DISABLE_CLUSTERING,
};
static int __devinit sgiwd93_probe(struct platform_device *pdev)
{
struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
unsigned char *wdregs = pd->wdregs;
struct hpc3_scsiregs *hregs = pd->hregs;
struct ip22_hostdata *hdata;
struct Scsi_Host *host;
wd33c93_regs regs;
unsigned int unit = pd->unit;
unsigned int irq = pd->irq;
int err;
host = scsi_host_alloc(&sgiwd93_template, sizeof(struct ip22_hostdata));
if (!host) {
err = -ENOMEM;
goto out;
}
host->base = (unsigned long) hregs;
host->irq = irq;
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
&hdata->dma, GFP_KERNEL);
if (!hdata->cpu) {
printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
"host %d buffer.\n", unit);
err = -ENOMEM;
goto out_put;
}
init_hpc_chain(hdata);
regs.SASR = wdregs + 3;
regs.SCMD = wdregs + 7;
hdata->wh.no_sync = 0;
hdata->wh.fast = 1;
hdata->wh.dma_mode = CTRL_BURST;
wd33c93_init(host, regs, dma_setup, dma_stop, WD33C93_FS_MHZ(20));
err = request_irq(irq, sgiwd93_intr, 0, "SGI WD93", host);
if (err) {
printk(KERN_WARNING "sgiwd93: Could not register irq %d "
"for host %d.\n", irq, unit);
goto out_free;
}
platform_set_drvdata(pdev, host);
err = scsi_add_host(host, NULL);
if (err)
goto out_irq;
scsi_scan_host(host);
return 0;
out_irq:
free_irq(irq, host);
out_free:
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
out_put:
scsi_host_put(host);
out:
return err;
}
static int __exit sgiwd93_remove(struct platform_device *pdev)
{
struct Scsi_Host *host = platform_get_drvdata(pdev);
struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata;
struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
scsi_remove_host(host);
free_irq(pd->irq, host);
dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
scsi_host_put(host);
return 0;
}
static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
.remove = __devexit_p(sgiwd93_remove),
.driver = {
.name = "sgiwd93",
.owner = THIS_MODULE,
}
};
static int __init sgiwd93_module_init(void)
{
return platform_driver_register(&sgiwd93_driver);
}
static void __exit sgiwd93_module_exit(void)
{
return platform_driver_unregister(&sgiwd93_driver);
}
module_init(sgiwd93_module_init);
module_exit(sgiwd93_module_exit);
MODULE_DESCRIPTION("SGI WD33C93 driver");
MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sgiwd93");
| gpl-2.0 |
AospPlus/android_kernel_msm | drivers/gpio/gpio-rcar.c | 1719 | 10519 | /*
* Renesas R-Car GPIO Support
*
* Copyright (C) 2013 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/gpio-rcar.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
struct gpio_rcar_priv {
void __iomem *base;
spinlock_t lock;
struct gpio_rcar_config config;
struct platform_device *pdev;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
struct irq_domain *irq_domain;
};
#define IOINTSEL 0x00
#define INOUTSEL 0x04
#define OUTDT 0x08
#define INDT 0x0c
#define INTDT 0x10
#define INTCLR 0x14
#define INTMSK 0x18
#define MSKCLR 0x1c
#define POSNEG 0x20
#define EDGLEVEL 0x24
#define FILONOFF 0x28
static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs)
{
return ioread32(p->base + offs);
}
static inline void gpio_rcar_write(struct gpio_rcar_priv *p, int offs,
u32 value)
{
iowrite32(value, p->base + offs);
}
static void gpio_rcar_modify_bit(struct gpio_rcar_priv *p, int offs,
int bit, bool value)
{
u32 tmp = gpio_rcar_read(p, offs);
if (value)
tmp |= BIT(bit);
else
tmp &= ~BIT(bit);
gpio_rcar_write(p, offs, tmp);
}
static void gpio_rcar_irq_disable(struct irq_data *d)
{
struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
gpio_rcar_write(p, INTMSK, ~BIT(irqd_to_hwirq(d)));
}
static void gpio_rcar_irq_enable(struct irq_data *d)
{
struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
gpio_rcar_write(p, MSKCLR, BIT(irqd_to_hwirq(d)));
}
static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
unsigned int hwirq,
bool active_high_rising_edge,
bool level_trigger)
{
unsigned long flags;
/* follow steps in the GPIO documentation for
* "Setting Edge-Sensitive Interrupt Input Mode" and
* "Setting Level-Sensitive Interrupt Input Mode"
*/
spin_lock_irqsave(&p->lock, flags);
/* Configure postive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
/* Configure edge or level trigger in EDGLEVEL */
gpio_rcar_modify_bit(p, EDGLEVEL, hwirq, !level_trigger);
/* Select "Interrupt Input Mode" in IOINTSEL */
gpio_rcar_modify_bit(p, IOINTSEL, hwirq, true);
/* Write INTCLR in case of edge trigger */
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_rcar_priv *p = irq_data_get_irq_chip_data(d);
unsigned int hwirq = irqd_to_hwirq(d);
dev_dbg(&p->pdev->dev, "sense irq = %d, type = %d\n", hwirq, type);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_HIGH:
gpio_rcar_config_interrupt_input_mode(p, hwirq, true, true);
break;
case IRQ_TYPE_LEVEL_LOW:
gpio_rcar_config_interrupt_input_mode(p, hwirq, false, true);
break;
case IRQ_TYPE_EDGE_RISING:
gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false);
break;
case IRQ_TYPE_EDGE_FALLING:
gpio_rcar_config_interrupt_input_mode(p, hwirq, false, false);
break;
default:
return -EINVAL;
}
return 0;
}
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
u32 pending;
unsigned int offset, irqs_handled = 0;
while ((pending = gpio_rcar_read(p, INTDT))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
irqs_handled++;
}
return irqs_handled ? IRQ_HANDLED : IRQ_NONE;
}
static inline struct gpio_rcar_priv *gpio_to_priv(struct gpio_chip *chip)
{
return container_of(chip, struct gpio_rcar_priv, gpio_chip);
}
static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
unsigned int gpio,
bool output)
{
struct gpio_rcar_priv *p = gpio_to_priv(chip);
unsigned long flags;
/* follow steps in the GPIO documentation for
* "Setting General Output Mode" and
* "Setting General Input Mode"
*/
spin_lock_irqsave(&p->lock, flags);
/* Configure postive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
/* Select "General Input/Output Mode" in IOINTSEL */
gpio_rcar_modify_bit(p, IOINTSEL, gpio, false);
/* Select Input Mode or Output Mode in INOUTSEL */
gpio_rcar_modify_bit(p, INOUTSEL, gpio, output);
spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_request_gpio(chip->base + offset);
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
pinctrl_free_gpio(chip->base + offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
{
gpio_rcar_config_general_input_output_mode(chip, offset, false);
return 0;
}
static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
{
return (int)(gpio_rcar_read(gpio_to_priv(chip), INDT) & BIT(offset));
}
static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_rcar_priv *p = gpio_to_priv(chip);
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
/* write GPIO value to output before selecting output mode of pin */
gpio_rcar_set(chip, offset, value);
gpio_rcar_config_general_input_output_mode(chip, offset, true);
return 0;
}
static int gpio_rcar_to_irq(struct gpio_chip *chip, unsigned offset)
{
return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
}
static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct gpio_rcar_priv *p = h->host_data;
dev_dbg(&p->pdev->dev, "map hw irq = %d, virq = %d\n", (int)hw, virq);
irq_set_chip_data(virq, h->host_data);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
set_irq_flags(virq, IRQF_VALID); /* kill me now */
return 0;
}
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
};
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_config *pdata = pdev->dev.platform_data;
struct gpio_rcar_priv *p;
struct resource *io, *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
const char *name = dev_name(&pdev->dev);
int ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
ret = -ENOMEM;
goto err0;
}
/* deal with driver instance configuration */
if (pdata)
p->config = *pdata;
p->pdev = pdev;
platform_set_drvdata(pdev, p);
spin_lock_init(&p->lock);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!io || !irq) {
dev_err(&pdev->dev, "missing IRQ or IOMEM\n");
ret = -EINVAL;
goto err0;
}
p->base = devm_ioremap_nocache(&pdev->dev, io->start,
resource_size(io));
if (!p->base) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
ret = -ENXIO;
goto err0;
}
gpio_chip = &p->gpio_chip;
gpio_chip->request = gpio_rcar_request;
gpio_chip->free = gpio_rcar_free;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
gpio_chip->set = gpio_rcar_set;
gpio_chip->to_irq = gpio_rcar_to_irq;
gpio_chip->label = name;
gpio_chip->owner = THIS_MODULE;
gpio_chip->base = p->config.gpio_base;
gpio_chip->ngpio = p->config.number_of_pins;
irq_chip = &p->irq_chip;
irq_chip->name = name;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_enable = gpio_rcar_irq_enable;
irq_chip->irq_disable = gpio_rcar_irq_disable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED;
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
p->config.number_of_pins,
p->config.irq_base,
&gpio_rcar_irq_domain_ops, p);
if (!p->irq_domain) {
ret = -ENXIO;
dev_err(&pdev->dev, "cannot initialize irq domain\n");
goto err0;
}
if (devm_request_irq(&pdev->dev, irq->start,
gpio_rcar_irq_handler, IRQF_SHARED, name, p)) {
dev_err(&pdev->dev, "failed to request IRQ\n");
ret = -ENOENT;
goto err1;
}
ret = gpiochip_add(gpio_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add GPIO controller\n");
goto err1;
}
dev_info(&pdev->dev, "driving %d GPIOs\n", p->config.number_of_pins);
/* warn in case of mismatch if irq base is specified */
if (p->config.irq_base) {
ret = irq_find_mapping(p->irq_domain, 0);
if (p->config.irq_base != ret)
dev_warn(&pdev->dev, "irq base mismatch (%u/%u)\n",
p->config.irq_base, ret);
}
ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0,
gpio_chip->base, gpio_chip->ngpio);
if (ret < 0)
dev_warn(&pdev->dev, "failed to add pin range\n");
return 0;
err1:
irq_domain_remove(p->irq_domain);
err0:
return ret;
}
static int gpio_rcar_remove(struct platform_device *pdev)
{
struct gpio_rcar_priv *p = platform_get_drvdata(pdev);
int ret;
ret = gpiochip_remove(&p->gpio_chip);
if (ret)
return ret;
irq_domain_remove(p->irq_domain);
return 0;
}
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
.remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
}
};
module_platform_driver(gpio_rcar_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas R-Car GPIO Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
talnoah/m7-sense | arch/arm/kernel/patch.c | 2999 | 1609 | #include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
#include "patch.h"
struct patch {
void *addr;
unsigned int insn;
};
void __kprobes __patch_text(void *addr, unsigned int insn)
{
bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
int size;
if (thumb2 && __opcode_is_thumb16(insn)) {
*(u16 *)addr = __opcode_to_mem_thumb16(insn);
size = sizeof(u16);
} else if (thumb2 && ((uintptr_t)addr & 2)) {
u16 first = __opcode_thumb32_first(insn);
u16 second = __opcode_thumb32_second(insn);
u16 *addrh = addr;
addrh[0] = __opcode_to_mem_thumb16(first);
addrh[1] = __opcode_to_mem_thumb16(second);
size = sizeof(u32);
} else {
if (thumb2)
insn = __opcode_to_mem_thumb32(insn);
else
insn = __opcode_to_mem_arm(insn);
*(u32 *)addr = insn;
size = sizeof(u32);
}
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
}
static int __kprobes patch_text_stop_machine(void *data)
{
struct patch *patch = data;
__patch_text(patch->addr, patch->insn);
return 0;
}
void __kprobes patch_text(void *addr, unsigned int insn)
{
struct patch patch = {
.addr = addr,
.insn = insn,
};
if (cache_ops_need_broadcast()) {
stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
} else {
bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
&& __opcode_is_thumb32(insn)
&& ((uintptr_t)addr & 2);
if (straddles_word)
stop_machine(patch_text_stop_machine, &patch, NULL);
else
__patch_text(addr, insn);
}
}
| gpl-2.0 |
percy-g2/android_kernel_sony_msm8226 | drivers/video/vt8623fb.c | 4791 | 27301 | /*
* linux/drivers/video/vt8623fb.c - fbdev driver for
* integrated graphic core in VIA VT8623 [CLE266] chipset
*
* Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Code is based on s3fb, some parts are from David Boucher's viafb
* (http://davesdomain.org.uk/viafb/)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/svga.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
#include <video/vga.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
struct vt8623fb_info {
char __iomem *mmio_base;
int mtrr_reg;
struct vgastate state;
struct mutex open_lock;
unsigned int ref_count;
u32 pseudo_palette[16];
};
/* ------------------------------------------------------------------------- */
static const struct svga_fb_format vt8623fb_formats[] = {
{ 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
{ 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
{ 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
{16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
{32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
SVGA_FORMAT_END
};
static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
60000, 300000, 14318};
/* CRT timing register sets */
static struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
static struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
static struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
static struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
static struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
static struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
static struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
static struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
static struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
static struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
static struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
static struct svga_timing_regs vt8623_timing_regs = {
vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
};
/* ------------------------------------------------------------------------- */
/* Module parameters */
static char *mode_option = "640x480-8@60";
#ifdef CONFIG_MTRR
static int mtrr = 1;
#endif
MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
module_param(mode_option, charp, 0644);
MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
module_param_named(mode, mode_option, charp, 0);
MODULE_PARM_DESC(mode, "Default video mode e.g. '648x480-8@60' (deprecated)");
#ifdef CONFIG_MTRR
module_param(mtrr, int, 0444);
MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
#endif
/* ------------------------------------------------------------------------- */
static void vt8623fb_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor)
{
struct vt8623fb_info *par = info->par;
svga_tilecursor(par->state.vgabase, info, cursor);
}
static struct fb_tile_ops vt8623fb_tile_ops = {
.fb_settile = svga_settile,
.fb_tilecopy = svga_tilecopy,
.fb_tilefill = svga_tilefill,
.fb_tileblit = svga_tileblit,
.fb_tilecursor = vt8623fb_tilecursor,
.fb_get_tilemax = svga_get_tilemax,
};
/* ------------------------------------------------------------------------- */
/* image data is MSB-first, fb structure is MSB-first too */
static inline u32 expand_color(u32 c)
{
return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
}
/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = expand_color(image->fg_color);
u32 bg = expand_color(image->bg_color);
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = *(src++) * 0x01010101;
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
u32 fg = expand_color(rect->color);
u8 __iomem *dst1;
u32 __iomem *dst;
int x, y;
dst1 = info->screen_base + (rect->dy * info->fix.line_length)
+ ((rect->dx / 8) * 4);
for (y = 0; y < rect->height; y++) {
dst = (u32 __iomem *) dst1;
for (x = 0; x < rect->width; x += 8) {
fb_writel(fg, dst++);
}
dst1 += info->fix.line_length;
}
}
/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
static inline u32 expand_pixel(u32 c)
{
return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
}
/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
{
u32 fg = image->fg_color * 0x11111111;
u32 bg = image->bg_color * 0x11111111;
const u8 *src1, *src;
u8 __iomem *dst1;
u32 __iomem *dst;
u32 val;
int x, y;
src1 = image->data;
dst1 = info->screen_base + (image->dy * info->fix.line_length)
+ ((image->dx / 8) * 4);
for (y = 0; y < image->height; y++) {
src = src1;
dst = (u32 __iomem *) dst1;
for (x = 0; x < image->width; x += 8) {
val = expand_pixel(*(src++));
val = (val & fg) | (~val & bg);
fb_writel(val, dst++);
}
src1 += image->width / 8;
dst1 += info->fix.line_length;
}
}
static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
&& ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
vt8623fb_iplan_imageblit(info, image);
else
vt8623fb_cfb4_imageblit(info, image);
} else
cfb_imageblit(info, image);
}
static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
if ((info->var.bits_per_pixel == 4)
&& ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
&& (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
vt8623fb_iplan_fillrect(info, rect);
else
cfb_fillrect(info, rect);
}
/* ------------------------------------------------------------------------- */
static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
{
struct vt8623fb_info *par = info->par;
u16 m, n, r;
u8 regval;
int rv;
rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
if (rv < 0) {
printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
return;
}
/* Set VGA misc register */
regval = vga_r(par->state.vgabase, VGA_MIS_R);
vga_w(par->state.vgabase, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
/* Set clock registers */
vga_wseq(par->state.vgabase, 0x46, (n | (r << 6)));
vga_wseq(par->state.vgabase, 0x47, m);
udelay(1000);
/* PLL reset */
svga_wseq_mask(par->state.vgabase, 0x40, 0x02, 0x02);
svga_wseq_mask(par->state.vgabase, 0x40, 0x00, 0x02);
}
static int vt8623fb_open(struct fb_info *info, int user)
{
struct vt8623fb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
void __iomem *vgabase = par->state.vgabase;
memset(&(par->state), 0, sizeof(struct vgastate));
par->state.vgabase = vgabase;
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
par->state.num_crtc = 0xA2;
par->state.num_seq = 0x50;
save_vga(&(par->state));
}
par->ref_count++;
mutex_unlock(&(par->open_lock));
return 0;
}
static int vt8623fb_release(struct fb_info *info, int user)
{
struct vt8623fb_info *par = info->par;
mutex_lock(&(par->open_lock));
if (par->ref_count == 0) {
mutex_unlock(&(par->open_lock));
return -EINVAL;
}
if (par->ref_count == 1)
restore_vga(&(par->state));
par->ref_count--;
mutex_unlock(&(par->open_lock));
return 0;
}
static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
int rv, mem, step;
/* Find appropriate format */
rv = svga_match_format (vt8623fb_formats, var, NULL);
if (rv < 0)
{
printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
return rv;
}
/* Do not allow to have real resoulution larger than virtual */
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
/* Round up xres_virtual to have proper alignment of lines */
step = vt8623fb_formats[rv].xresstep - 1;
var->xres_virtual = (var->xres_virtual+step) & ~step;
/* Check whether have enough memory */
mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
if (mem > info->screen_size)
{
printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
return -EINVAL;
}
/* Text mode is limited to 256 kB of memory */
if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
{
printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
return -EINVAL;
}
rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
if (rv < 0)
{
printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
return rv;
}
/* Interlaced mode not supported */
if (var->vmode & FB_VMODE_INTERLACED)
return -EINVAL;
return 0;
}
static int vt8623fb_set_par(struct fb_info *info)
{
u32 mode, offset_value, fetch_value, screen_size;
struct vt8623fb_info *par = info->par;
u32 bpp = info->var.bits_per_pixel;
if (bpp != 0) {
info->fix.ypanstep = 1;
info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
info->pixmap.blit_y = ~(u32)0;
offset_value = (info->var.xres_virtual * bpp) / 64;
fetch_value = ((info->var.xres * bpp) / 128) + 4;
if (bpp == 4)
fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
screen_size = info->var.yres_virtual * info->fix.line_length;
} else {
info->fix.ypanstep = 16;
info->fix.line_length = 0;
info->flags |= FBINFO_MISC_TILEBLITTING;
info->tileops = &vt8623fb_tile_ops;
/* supports 8x16 tiles only */
info->pixmap.blit_x = 1 << (8 - 1);
info->pixmap.blit_y = 1 << (16 - 1);
offset_value = info->var.xres_virtual / 16;
fetch_value = (info->var.xres / 8) + 8;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
}
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
/* Unlock registers */
svga_wseq_mask(par->state.vgabase, 0x10, 0x01, 0x01);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x80);
svga_wcrt_mask(par->state.vgabase, 0x47, 0x00, 0x01);
/* Device, screen and sync off */
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
svga_wcrt_mask(par->state.vgabase, 0x17, 0x00, 0x80);
/* Set default values */
svga_set_default_gfx_regs(par->state.vgabase);
svga_set_default_atc_regs(par->state.vgabase);
svga_set_default_seq_regs(par->state.vgabase);
svga_set_default_crt_regs(par->state.vgabase);
svga_wcrt_multi(par->state.vgabase, vt8623_line_compare_regs, 0xFFFFFFFF);
svga_wcrt_multi(par->state.vgabase, vt8623_start_address_regs, 0);
svga_wcrt_multi(par->state.vgabase, vt8623_offset_regs, offset_value);
svga_wseq_multi(par->state.vgabase, vt8623_fetch_count_regs, fetch_value);
/* Clear H/V Skew */
svga_wcrt_mask(par->state.vgabase, 0x03, 0x00, 0x60);
svga_wcrt_mask(par->state.vgabase, 0x05, 0x00, 0x60);
if (info->var.vmode & FB_VMODE_DOUBLE)
svga_wcrt_mask(par->state.vgabase, 0x09, 0x80, 0x80);
else
svga_wcrt_mask(par->state.vgabase, 0x09, 0x00, 0x80);
svga_wseq_mask(par->state.vgabase, 0x1E, 0xF0, 0xF0); // DI/DVP bus
svga_wseq_mask(par->state.vgabase, 0x2A, 0x0F, 0x0F); // DI/DVP bus
svga_wseq_mask(par->state.vgabase, 0x16, 0x08, 0xBF); // FIFO read threshold
vga_wseq(par->state.vgabase, 0x17, 0x1F); // FIFO depth
vga_wseq(par->state.vgabase, 0x18, 0x4E);
svga_wseq_mask(par->state.vgabase, 0x1A, 0x08, 0x08); // enable MMIO ?
vga_wcrt(par->state.vgabase, 0x32, 0x00);
vga_wcrt(par->state.vgabase, 0x34, 0x00);
vga_wcrt(par->state.vgabase, 0x6A, 0x80);
vga_wcrt(par->state.vgabase, 0x6A, 0xC0);
vga_wgfx(par->state.vgabase, 0x20, 0x00);
vga_wgfx(par->state.vgabase, 0x21, 0x00);
vga_wgfx(par->state.vgabase, 0x22, 0x00);
/* Set SR15 according to number of bits per pixel */
mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
switch (mode) {
case 0:
pr_debug("fb%d: text mode\n", info->node);
svga_set_textmode_vga_regs(par->state.vgabase);
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x60, 0x70);
break;
case 1:
pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
vga_wgfx(par->state.vgabase, VGA_GFX_MODE, 0x40);
svga_wseq_mask(par->state.vgabase, 0x15, 0x20, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 2:
pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x15, 0x00, 0xFE);
svga_wcrt_mask(par->state.vgabase, 0x11, 0x00, 0x70);
break;
case 3:
pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x15, 0x22, 0xFE);
break;
case 4:
pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x15, 0xB6, 0xFE);
break;
case 5:
pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
svga_wseq_mask(par->state.vgabase, 0x15, 0xAE, 0xFE);
break;
default:
printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
return (-EINVAL);
}
vt8623_set_pixclock(info, info->var.pixclock);
svga_set_timings(par->state.vgabase, &vt8623_timing_regs, &(info->var), 1, 1,
(info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
1, info->node);
memset_io(info->screen_base, 0x00, screen_size);
/* Device and screen back on */
svga_wcrt_mask(par->state.vgabase, 0x17, 0x80, 0x80);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
return 0;
}
static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
switch (fb->var.bits_per_pixel) {
case 0:
case 4:
if (regno >= 16)
return -EINVAL;
outb(0x0F, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 8:
if (regno >= 256)
return -EINVAL;
outb(0xFF, VGA_PEL_MSK);
outb(regno, VGA_PEL_IW);
outb(red >> 10, VGA_PEL_D);
outb(green >> 10, VGA_PEL_D);
outb(blue >> 10, VGA_PEL_D);
break;
case 16:
if (regno >= 16)
return 0;
if (fb->var.green.length == 5)
((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
else if (fb->var.green.length == 6)
((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
else
return -EINVAL;
break;
case 24:
case 32:
if (regno >= 16)
return 0;
/* ((transp & 0xFF00) << 16) */
((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
(green & 0xFF00) | ((blue & 0xFF00) >> 8);
break;
default:
return -EINVAL;
}
return 0;
}
static int vt8623fb_blank(int blank_mode, struct fb_info *info)
{
struct vt8623fb_info *par = info->par;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
pr_debug("fb%d: unblank\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x00, 0x20);
break;
case FB_BLANK_NORMAL:
pr_debug("fb%d: blank\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x00, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_HSYNC_SUSPEND:
pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x10, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_VSYNC_SUSPEND:
pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x20, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
case FB_BLANK_POWERDOWN:
pr_debug("fb%d: DPMS off (no sync)\n", info->node);
svga_wcrt_mask(par->state.vgabase, 0x36, 0x30, 0x30);
svga_wseq_mask(par->state.vgabase, 0x01, 0x20, 0x20);
break;
}
return 0;
}
static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct vt8623fb_info *par = info->par;
unsigned int offset;
/* Calculate the offset */
if (info->var.bits_per_pixel == 0) {
offset = (var->yoffset / 16) * info->var.xres_virtual
+ var->xoffset;
offset = offset >> 3;
} else {
offset = (var->yoffset * info->fix.line_length) +
(var->xoffset * info->var.bits_per_pixel / 8);
offset = offset >> ((info->var.bits_per_pixel == 4) ? 2 : 1);
}
/* Set the offset */
svga_wcrt_multi(par->state.vgabase, vt8623_start_address_regs, offset);
return 0;
}
/* ------------------------------------------------------------------------- */
/* Frame buffer operations */
static struct fb_ops vt8623fb_ops = {
.owner = THIS_MODULE,
.fb_open = vt8623fb_open,
.fb_release = vt8623fb_release,
.fb_check_var = vt8623fb_check_var,
.fb_set_par = vt8623fb_set_par,
.fb_setcolreg = vt8623fb_setcolreg,
.fb_blank = vt8623fb_blank,
.fb_pan_display = vt8623fb_pan_display,
.fb_fillrect = vt8623fb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = vt8623fb_imageblit,
.fb_get_caps = svga_get_caps,
};
/* PCI probe */
static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct pci_bus_region bus_reg;
struct resource vga_res;
struct fb_info *info;
struct vt8623fb_info *par;
unsigned int memsize1, memsize2;
int rc;
/* Ignore secondary VGA device because there is no VGA arbitration */
if (! svga_primary_device(dev)) {
dev_info(&(dev->dev), "ignoring secondary device\n");
return -ENODEV;
}
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
if (! info) {
dev_err(&(dev->dev), "cannot allocate memory\n");
return -ENOMEM;
}
par = info->par;
mutex_init(&par->open_lock);
info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
info->fbops = &vt8623fb_ops;
/* Prepare PCI device */
rc = pci_enable_device(dev);
if (rc < 0) {
dev_err(info->device, "cannot enable PCI device\n");
goto err_enable_device;
}
rc = pci_request_regions(dev, "vt8623fb");
if (rc < 0) {
dev_err(info->device, "cannot reserve framebuffer region\n");
goto err_request_regions;
}
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = pci_resource_len(dev, 0);
info->fix.mmio_start = pci_resource_start(dev, 1);
info->fix.mmio_len = pci_resource_len(dev, 1);
/* Map physical IO memory address into kernel space */
info->screen_base = pci_iomap(dev, 0, 0);
if (! info->screen_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for framebuffer failed\n");
goto err_iomap_1;
}
par->mmio_base = pci_iomap(dev, 1, 0);
if (! par->mmio_base) {
rc = -ENOMEM;
dev_err(info->device, "iomap for MMIO failed\n");
goto err_iomap_2;
}
bus_reg.start = 0;
bus_reg.end = 64 * 1024;
vga_res.flags = IORESOURCE_IO;
pcibios_bus_to_resource(dev, &vga_res, &bus_reg);
par->state.vgabase = (void __iomem *) vga_res.start;
/* Find how many physical memory there is on card */
memsize1 = (vga_rseq(par->state.vgabase, 0x34) + 1) >> 1;
memsize2 = vga_rseq(par->state.vgabase, 0x39) << 2;
if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
info->screen_size = memsize1 << 20;
else {
dev_err(info->device, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
info->screen_size = 16 << 20;
}
info->fix.smem_len = info->screen_size;
strcpy(info->fix.id, "VIA VT8623");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.ypanstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->pseudo_palette = (void*)par->pseudo_palette;
/* Prepare startup mode */
kparam_block_sysfs_write(mode_option);
rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
kparam_unblock_sysfs_write(mode_option);
if (! ((rc == 1) || (rc == 2))) {
rc = -EINVAL;
dev_err(info->device, "mode %s not found\n", mode_option);
goto err_find_mode;
}
rc = fb_alloc_cmap(&info->cmap, 256, 0);
if (rc < 0) {
dev_err(info->device, "cannot allocate colormap\n");
goto err_alloc_cmap;
}
rc = register_framebuffer(info);
if (rc < 0) {
dev_err(info->device, "cannot register framebugger\n");
goto err_reg_fb;
}
printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
pci_name(dev), info->fix.smem_len >> 20);
/* Record a reference to the driver data */
pci_set_drvdata(dev, info);
#ifdef CONFIG_MTRR
if (mtrr) {
par->mtrr_reg = -1;
par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
}
#endif
return 0;
/* Error handling */
err_reg_fb:
fb_dealloc_cmap(&info->cmap);
err_alloc_cmap:
err_find_mode:
pci_iounmap(dev, par->mmio_base);
err_iomap_2:
pci_iounmap(dev, info->screen_base);
err_iomap_1:
pci_release_regions(dev);
err_request_regions:
/* pci_disable_device(dev); */
err_enable_device:
framebuffer_release(info);
return rc;
}
/* PCI remove */
static void __devexit vt8623_pci_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
if (info) {
struct vt8623fb_info *par = info->par;
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
#endif
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
pci_iounmap(dev, info->screen_base);
pci_iounmap(dev, par->mmio_base);
pci_release_regions(dev);
/* pci_disable_device(dev); */
pci_set_drvdata(dev, NULL);
framebuffer_release(info);
}
}
#ifdef CONFIG_PM
/* PCI suspend */
static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
dev_info(info->device, "suspend\n");
console_lock();
mutex_lock(&(par->open_lock));
if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
fb_set_suspend(info, 1);
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
/* PCI resume */
static int vt8623_pci_resume(struct pci_dev* dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct vt8623fb_info *par = info->par;
dev_info(info->device, "resume\n");
console_lock();
mutex_lock(&(par->open_lock));
if (par->ref_count == 0)
goto fail;
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
if (pci_enable_device(dev))
goto fail;
pci_set_master(dev);
vt8623fb_set_par(info);
fb_set_suspend(info, 0);
fail:
mutex_unlock(&(par->open_lock));
console_unlock();
return 0;
}
#else
#define vt8623_pci_suspend NULL
#define vt8623_pci_resume NULL
#endif /* CONFIG_PM */
/* List of boards that we are trying to support */
static struct pci_device_id vt8623_devices[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
{0, 0, 0, 0, 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, vt8623_devices);
static struct pci_driver vt8623fb_pci_driver = {
.name = "vt8623fb",
.id_table = vt8623_devices,
.probe = vt8623_pci_probe,
.remove = __devexit_p(vt8623_pci_remove),
.suspend = vt8623_pci_suspend,
.resume = vt8623_pci_resume,
};
/* Cleanup */
static void __exit vt8623fb_cleanup(void)
{
pr_debug("vt8623fb: cleaning up\n");
pci_unregister_driver(&vt8623fb_pci_driver);
}
/* Driver Initialisation */
static int __init vt8623fb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("vt8623fb", &option))
return -ENODEV;
if (option && *option)
mode_option = option;
#endif
pr_debug("vt8623fb: initializing\n");
return pci_register_driver(&vt8623fb_pci_driver);
}
/* ------------------------------------------------------------------------- */
/* Modularization */
module_init(vt8623fb_init);
module_exit(vt8623fb_cleanup);
| gpl-2.0 |
visi0nary/android_kernel_alps_k05ts_a | drivers/ide/cy82c693.c | 4791 | 6402 | /*
* Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
* Copyright (C) 2007-2011 Bartlomiej Zolnierkiewicz
*
* CYPRESS CY82C693 chipset IDE controller
*
* The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <linux/init.h>
#include <asm/io.h>
#define DRV_NAME "cy82c693"
/*
* NOTE: the value for busmaster timeout is tricky and I got it by
* trial and error! By using a to low value will cause DMA timeouts
* and drop IDE performance, and by using a to high value will cause
* audio playback to scatter.
* If you know a better value or how to calc it, please let me know.
*/
/* twice the value written in cy82c693ub datasheet */
#define BUSMASTER_TIMEOUT 0x50
/*
* the value above was tested on my machine and it seems to work okay
*/
/* here are the offset definitions for the registers */
#define CY82_IDE_CMDREG 0x04
#define CY82_IDE_ADDRSETUP 0x48
#define CY82_IDE_MASTER_IOR 0x4C
#define CY82_IDE_MASTER_IOW 0x4D
#define CY82_IDE_SLAVE_IOR 0x4E
#define CY82_IDE_SLAVE_IOW 0x4F
#define CY82_IDE_MASTER_8BIT 0x50
#define CY82_IDE_SLAVE_8BIT 0x51
#define CY82_INDEX_PORT 0x22
#define CY82_DATA_PORT 0x23
#define CY82_INDEX_CHANNEL0 0x30
#define CY82_INDEX_CHANNEL1 0x31
#define CY82_INDEX_TIMEOUT 0x32
/*
* set DMA mode a specific channel for CY82C693
*/
static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
const u8 mode = drive->dma_mode;
u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
data = (mode & 3) | (single << 2);
outb(index, CY82_INDEX_PORT);
outb(data, CY82_DATA_PORT);
/*
* note: below we set the value for Bus Master IDE TimeOut Register
* I'm not absolutely sure what this does, but it solved my problem
* with IDE DMA and sound, so I now can play sound and work with
* my IDE driver at the same time :-)
*
* If you know the correct (best) value for this register please
* let me know - ASK
*/
data = BUSMASTER_TIMEOUT;
outb(CY82_INDEX_TIMEOUT, CY82_INDEX_PORT);
outb(data, CY82_DATA_PORT);
}
static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
const unsigned long T = 1000000 / bus_speed;
unsigned int addrCtrl;
struct ide_timing t;
u8 time_16, time_8;
/* select primary or secondary channel */
if (drive->dn > 1) { /* drive is on the secondary channel */
dev = pci_get_slot(dev->bus, dev->devfn+1);
if (!dev) {
printk(KERN_ERR "%s: tune_drive: "
"Cannot find secondary interface!\n",
drive->name);
return;
}
}
ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
time_16 = clamp_val(t.recover - 1, 0, 15) |
(clamp_val(t.active - 1, 0, 15) << 4);
time_8 = clamp_val(t.act8b - 1, 0, 15) |
(clamp_val(t.rec8b - 1, 0, 15) << 4);
/* now let's write the clocks registers */
if ((drive->dn & 1) == 0) {
/*
* set master drive
* address setup control register
* is 32 bit !!!
*/
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF);
addrCtrl |= clamp_val(t.setup - 1, 0, 15);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
} else {
/*
* set slave drive
* address setup control register
* is 32 bit !!!
*/
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF0);
addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
if (drive->dn > 1)
pci_dev_put(dev);
}
static void init_iops_cy82c693(ide_hwif_t *hwif)
{
static ide_hwif_t *primary;
struct pci_dev *dev = to_pci_dev(hwif->dev);
if (PCI_FUNC(dev->devfn) == 1)
primary = hwif;
else {
hwif->mate = primary;
hwif->channel = 1;
}
}
static const struct ide_port_ops cy82c693_port_ops = {
.set_pio_mode = cy82c693_set_pio_mode,
.set_dma_mode = cy82c693_set_dma_mode,
};
static const struct ide_port_info cy82c693_chipset = {
.name = DRV_NAME,
.init_iops = init_iops_cy82c693,
.port_ops = &cy82c693_port_ops,
.host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2,
.mwdma_mask = ATA_MWDMA2,
};
static int cy82c693_init_one(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct pci_dev *dev2;
int ret = -ENODEV;
/* CY82C693 is more than only a IDE controller.
Function 1 is primary IDE channel, function 2 - secondary. */
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
PCI_FUNC(dev->devfn) == 1) {
dev2 = pci_get_slot(dev->bus, dev->devfn + 1);
ret = ide_pci_init_two(dev, dev2, &cy82c693_chipset, NULL);
if (ret)
pci_dev_put(dev2);
}
return ret;
}
static void cy82c693_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
ide_pci_remove(dev);
pci_dev_put(dev2);
}
static const struct pci_device_id cy82c693_pci_tbl[] = {
{ PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, cy82c693_pci_tbl);
static struct pci_driver cy82c693_pci_driver = {
.name = "Cypress_IDE",
.id_table = cy82c693_pci_tbl,
.probe = cy82c693_init_one,
.remove = cy82c693_remove,
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init cy82c693_ide_init(void)
{
return ide_pci_register_driver(&cy82c693_pci_driver);
}
static void __exit cy82c693_ide_exit(void)
{
pci_unregister_driver(&cy82c693_pci_driver);
}
module_init(cy82c693_ide_init);
module_exit(cy82c693_ide_exit);
MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bigbiff/android_kernel_samsung_n900 | fs/exofs/file.c | 8375 | 2586 | /*
* Copyright (C) 2005, 2006
* Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
* Copyrights for code taken from ext2:
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
* from
* linux/fs/minix/inode.c
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is part of exofs.
*
* exofs is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation. Since it is based on ext2, and the only
* valid version of GPL for the Linux kernel is version 2, the only valid
* version of GPL for exofs is version 2.
*
* exofs is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with exofs; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "exofs.h"
static int exofs_release_file(struct inode *inode, struct file *filp)
{
return 0;
}
/* exofs_file_fsync - flush the inode to disk
*
* Note, in exofs all metadata is written as part of inode, regardless.
* The writeout is synchronous
*/
static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = filp->f_mapping->host;
int ret;
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
ret = sync_inode_metadata(filp->f_mapping->host, 1);
mutex_unlock(&inode->i_mutex);
return ret;
}
static int exofs_flush(struct file *file, fl_owner_t id)
{
int ret = vfs_fsync(file, 0);
/* TODO: Flush the OSD target */
return ret;
}
const struct file_operations exofs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.open = generic_file_open,
.release = exofs_release_file,
.fsync = exofs_file_fsync,
.flush = exofs_flush,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
const struct inode_operations exofs_file_inode_operations = {
.setattr = exofs_setattr,
};
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.